VOL-1910 added techproile mock struct to cover openolt_flowmgr.go files
Change-Id: I0bd44890f02909da870771cc332f2a5de264020b
diff --git a/Gopkg.lock b/Gopkg.lock
index 9e946d7..dd1ecc7 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -227,7 +227,7 @@
[[projects]]
branch = "master"
- digest = "1:9302a6372545bf96d2aaa1f6ce2eb9f6bf815f582bd63753cc378de6405f85a0"
+ digest = "1:955257b92eafddae06807b037b66c0bbe34f3eb828dce0b7424e85d1270719ba"
name = "github.com/opencord/voltha-go"
packages = [
"adapters",
@@ -242,10 +242,10 @@
"rw_core/utils",
]
pruneopts = "UT"
- revision = "4c9e559d974d5a8cf30e9ba6425547303b9a5d0d"
+ revision = "676f0ddf24b3d5be9bdcadd89f2c8cc503468df8"
[[projects]]
- digest = "1:78a853c38b4935f01a99fbb41edbb4382a23586f929ecf02dd80d4452f2e3c0d"
+ digest = "1:bc21a70b7c12a8d8c2b008d2d61a313bf4ed8f021be84d815f744a869e69c96f"
name = "github.com/opencord/voltha-protos"
packages = [
"go/common",
@@ -257,8 +257,8 @@
"go/voltha",
]
pruneopts = "UT"
- revision = "792553b747df7f751d864f7c638aa5b667c0993e"
- version = "1.0.0"
+ revision = "b6b797cceae987556bc705acc6273b1c7e43ce05"
+ version = "v1.0.3"
[[projects]]
digest = "1:f690a0a27cefae695fa9587aa3ed23652e593be1d98b35f8184d10bccec30444"
@@ -454,14 +454,6 @@
version = "v1.22.0"
[[projects]]
- digest = "1:b449dbaada891dc97f016bc3519bb1af0e1a4296828f7a560a0ba47ea75f20bb"
- name = "gopkg.in/Shopify/sarama.v1"
- packages = ["."]
- pruneopts = "UT"
- revision = "dde3ddda8b4b3a594690086725799ab1573bb895"
- version = "v1.23.0"
-
-[[projects]]
digest = "1:c902038ee2d6f964d3b9f2c718126571410c5d81251cbab9fe58abd37803513c"
name = "gopkg.in/jcmturner/aescts.v1"
packages = ["."]
diff --git a/adaptercore/device_handler_test.go b/adaptercore/device_handler_test.go
index d6c65ea..d5facd5 100644
--- a/adaptercore/device_handler_test.go
+++ b/adaptercore/device_handler_test.go
@@ -124,6 +124,7 @@
ep := &mocks.MockEventProxy{}
openOLT := &OpenOLT{coreProxy: cp, adapterProxy: ap, eventProxy: ep}
dh := NewDeviceHandler(cp, ap, ep, device, openOLT)
+ dh.nniIntfID = 1
deviceInf := &oop.DeviceInfo{Vendor: "openolt", Ranges: nil, Model: "openolt", DeviceId: dh.deviceID}
dh.resourceMgr = &resourcemanager.OpenOltResourceMgr{DeviceID: dh.deviceID, DeviceType: dh.deviceType, DevInfo: deviceInf}
dh.flowMgr = NewFlowManager(dh, dh.resourceMgr)
diff --git a/adaptercore/openolt_flowmgr.go b/adaptercore/openolt_flowmgr.go
index 50fae3c..692def3 100644
--- a/adaptercore/openolt_flowmgr.go
+++ b/adaptercore/openolt_flowmgr.go
@@ -161,7 +161,7 @@
//OpenOltFlowMgr creates the Structure of OpenOltFlowMgr obj
type OpenOltFlowMgr struct {
- techprofile []*tp.TechProfileMgr
+ techprofile []tp.TechProfileIf
deviceHandler *DeviceHandler
resourceMgr *rsrcMgr.OpenOltResourceMgr
onuIds map[onuIDKey]onuInfo //OnuId -> OnuInfo
@@ -177,7 +177,7 @@
var flowMgr OpenOltFlowMgr
flowMgr.deviceHandler = dh
flowMgr.resourceMgr = rsrcMgr
- flowMgr.techprofile = make([]*tp.TechProfileMgr, MaxPonPorts)
+ flowMgr.techprofile = make([]tp.TechProfileIf, MaxPonPorts)
if err := flowMgr.populateTechProfilePerPonPort(); err != nil {
log.Error("Error while populating tech profile mgr\n")
return nil
diff --git a/adaptercore/openolt_flowmgr_test.go b/adaptercore/openolt_flowmgr_test.go
index 93a2e0e..ee25250 100644
--- a/adaptercore/openolt_flowmgr_test.go
+++ b/adaptercore/openolt_flowmgr_test.go
@@ -18,24 +18,28 @@
package adaptercore
import (
- "fmt"
"testing"
+ "github.com/opencord/voltha-protos/go/voltha"
+
"github.com/opencord/voltha-go/common/log"
tp "github.com/opencord/voltha-go/common/techprofile"
"github.com/opencord/voltha-go/db/model"
fu "github.com/opencord/voltha-go/rw_core/utils"
"github.com/opencord/voltha-openolt-adapter/adaptercore/resourcemanager"
+ rsrcMgr "github.com/opencord/voltha-openolt-adapter/adaptercore/resourcemanager"
"github.com/opencord/voltha-openolt-adapter/mocks"
ofp "github.com/opencord/voltha-protos/go/openflow_13"
"github.com/opencord/voltha-protos/go/openolt"
openoltpb2 "github.com/opencord/voltha-protos/go/openolt"
tp_pb "github.com/opencord/voltha-protos/go/tech_profile"
- "github.com/opencord/voltha-protos/go/voltha"
)
+var flowMgr *OpenOltFlowMgr
+
func init() {
log.SetDefaultLogger(log.JSON, log.DebugLevel, nil)
+ flowMgr = newMockFlowmgr()
}
func newMockResourceMgr() *resourcemanager.OpenOltResourceMgr {
ranges := []*openolt.DeviceInfo_DeviceResourceRanges{
@@ -48,6 +52,11 @@
Ranges: ranges,
}
rsrMgr := resourcemanager.NewResourceMgr("olt", "127.0.0.1:2379", "etcd", "olt", deviceinfo)
+ for key := range rsrMgr.ResourceMgrs {
+ rsrMgr.ResourceMgrs[key].KVStore = &model.Backend{}
+ rsrMgr.ResourceMgrs[key].KVStore.Client = &mocks.MockKVClient{}
+ rsrMgr.ResourceMgrs[key].TechProfileMgr = mocks.MockTechProfile{TpID: key}
+ }
return rsrMgr
}
@@ -80,15 +89,16 @@
packetInGemPort[packetInInfoKey{intfID: 2, onuID: 2, logicalPort: 2}] = 2
flwMgr.packetInGemPort = packetInGemPort
- tps := make([]*tp.TechProfileMgr, len(rsrMgr.ResourceMgrs))
- for key, val := range rsrMgr.ResourceMgrs {
- tps[key] = val.TechProfileMgr
+ tps := make([]tp.TechProfileIf, len(rsrMgr.ResourceMgrs))
+ for key := range rsrMgr.ResourceMgrs {
+ tps[key] = mocks.MockTechProfile{TpID: key}
}
flwMgr.techprofile = tps
return flwMgr
}
+
func TestOpenOltFlowMgr_CreateSchedulerQueues(t *testing.T) {
- flowMgr := newMockFlowmgr()
+ // flowMgr := newMockFlowmgr()
tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
ProfileType: "pt1", NumGemPorts: 1, NumTconts: 1, Version: 1,
@@ -135,7 +145,7 @@
//Negative testcases
{"CreateSchedulerQueues-7", args{Dir: tp_pb.Direction_UPSTREAM, IntfID: 1, OnuID: 1, UniID: 1, UniPort: 1, TpInst: tprofile, MeterID: 1, flowMetadata: &voltha.FlowMetadata{}}, true},
{"CreateSchedulerQueues-8", args{Dir: tp_pb.Direction_UPSTREAM, IntfID: 1, OnuID: 1, UniID: 1, UniPort: 1, TpInst: tprofile, MeterID: 0, flowMetadata: &voltha.FlowMetadata{}}, true},
- {"CreateSchedulerQueues-9", args{Dir: tp_pb.Direction_DOWNSTREAM, IntfID: 1, OnuID: 1, UniID: 1, UniPort: 1, TpInst: tprofile2, MeterID: 1, flowMetadata: &voltha.FlowMetadata{}}, true},
+ {"CreateSchedulerQueues-9", args{Dir: tp_pb.Direction_DOWNSTREAM, IntfID: 1, OnuID: 1, UniID: 1, UniPort: 1, TpInst: tprofile2, MeterID: 1, flowMetadata: &voltha.FlowMetadata{}}, false},
{"CreateSchedulerQueues-10", args{Dir: tp_pb.Direction_UPSTREAM, IntfID: 1, OnuID: 1, UniID: 1, UniPort: 1, TpInst: tprofile, MeterID: 2, flowMetadata: &voltha.FlowMetadata{}}, true},
{"CreateSchedulerQueues-11", args{Dir: tp_pb.Direction_DOWNSTREAM, IntfID: 1, OnuID: 1, UniID: 1, UniPort: 1, TpInst: tprofile2, MeterID: 2, flowMetadata: &voltha.FlowMetadata{}}, true},
{"CreateSchedulerQueues-12", args{Dir: tp_pb.Direction_DOWNSTREAM, IntfID: 1, OnuID: 1, UniID: 1, UniPort: 1, TpInst: tprofile2, MeterID: 2}, true},
@@ -151,7 +161,7 @@
func TestOpenOltFlowMgr_RemoveSchedulerQueues(t *testing.T) {
- flowMgr := newMockFlowmgr()
+ // flowMgr := newMockFlowmgr()
tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
ProfileType: "pt1", NumGemPorts: 1, NumTconts: 1, Version: 1,
InstanceCtrl: tp.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
@@ -193,11 +203,12 @@
}
})
}
+
}
func TestOpenOltFlowMgr_RemoveFlow(t *testing.T) {
- flowMgr := newMockFlowmgr()
-
+ // flowMgr := newMockFlowmgr()
+ log.Debug("Info Warning Error: Starting RemoveFlow() test")
fa := &fu.FlowArgs{
MatchFields: []*ofp.OfpOxmOfbField{
fu.InPort(2),
@@ -211,6 +222,36 @@
},
}
ofpstats := fu.MkFlowStat(fa)
+ ofpstats.Cookie = ofpstats.Id
+ flowMgr.storedDeviceFlows = append(flowMgr.storedDeviceFlows, *ofpstats)
+ lldpFa := &fu.FlowArgs{
+ KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(1),
+ fu.EthType(0x88CC),
+ fu.TunnelId(536870912),
+ },
+ Actions: []*ofp.OfpAction{
+ fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+ },
+ }
+ lldpofpstats := fu.MkFlowStat(lldpFa)
+ //lldpofpstats.Cookie = lldpofpstats.Id
+
+ dhcpFa := &fu.FlowArgs{
+ KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(1),
+ fu.UdpSrc(67),
+ //fu.TunnelId(536870912),
+ fu.IpProto(17),
+ },
+ Actions: []*ofp.OfpAction{
+ fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+ },
+ }
+ dhcpofpstats := fu.MkFlowStat(dhcpFa)
+ //dhcpofpstats.Cookie = dhcpofpstats.Id
type args struct {
flow *ofp.OfpFlowStats
}
@@ -220,23 +261,58 @@
}{
// TODO: Add test cases.
{"RemoveFlow", args{flow: ofpstats}},
+ {"RemoveFlow", args{flow: lldpofpstats}},
+ {"RemoveFlow", args{flow: dhcpofpstats}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
flowMgr.RemoveFlow(tt.args.flow)
})
}
+ // t.Error("=====")
}
func TestOpenOltFlowMgr_AddFlow(t *testing.T) {
-
- flowMgr := newMockFlowmgr()
+ // flowMgr := newMockFlowmgr()
kw := make(map[string]uint64)
kw["table_id"] = 1
kw["meter_id"] = 1
- kw["write_metadata"] = 2
+ kw["write_metadata"] = 0x4000000000 // Tech-Profile-ID 64
+
+ // Upstream flow
fa := &fu.FlowArgs{
MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(536870912),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+ fu.Output(65536),
+ fu.PushVlan(0x8100),
+ },
+ KV: kw,
+ }
+
+ // Downstream flow
+ fa3 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(65536),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ //fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 101)),
+ fu.PopVlan(),
+ fu.Output(536870912),
+ },
+ KV: kw,
+ }
+
+ fa2 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
fu.InPort(1000),
fu.Metadata_ofp(1),
fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
@@ -249,11 +325,145 @@
KV: kw,
}
+ // TODO Add LLDP flow
+ // TODO Add DHCP flow
+
+ // Flows for negative scenarios
+ // Failure in formulateActionInfoFromFlow()
+ fa4 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(1000),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ },
+ Actions: []*ofp.OfpAction{
+ fu.Experimenter(257, []byte{1, 2, 3, 4}),
+ },
+ KV: kw,
+ }
+
+ // Invalid Output
+ fa5 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(1000),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ },
+ Actions: []*ofp.OfpAction{
+ fu.Output(0),
+ },
+ KV: kw,
+ }
+
+ // Tech-Profile-ID update (not supported)
+ kw6 := make(map[string]uint64)
+ kw6["table_id"] = 1
+ kw6["meter_id"] = 1
+ kw6["write_metadata"] = 0x4100000000 // TpID Other than the stored one
+ fa6 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(536870912),
+ fu.TunnelId(16),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+ fu.Output(65535),
+ },
+ KV: kw6,
+ }
+
+ lldpFa := &fu.FlowArgs{
+ KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(1),
+ fu.EthType(0x88CC),
+ fu.TunnelId(536870912),
+ },
+ Actions: []*ofp.OfpAction{
+ fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+ },
+ }
+
+ dhcpFa := &fu.FlowArgs{
+ KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(1),
+ fu.UdpSrc(67),
+ //fu.TunnelId(536870912),
+ fu.IpProto(17),
+ },
+ Actions: []*ofp.OfpAction{
+ fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+ },
+ }
+ igmpFa := &fu.FlowArgs{
+ KV: fu.OfpFlowModArgs{"priority": 1000, "cookie": 48132224281636694},
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(1),
+ fu.UdpSrc(67),
+ //fu.TunnelId(536870912),
+ fu.IpProto(2),
+ },
+ Actions: []*ofp.OfpAction{
+ fu.Output(uint32(ofp.OfpPortNo_OFPP_CONTROLLER)),
+ },
+ }
+
+ fa9 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(536870912),
+ fu.TunnelId(16),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ fu.VlanPcp(1000),
+ fu.UdpDst(65535),
+ fu.UdpSrc(536870912),
+ fu.Ipv4Dst(65535),
+ fu.Ipv4Src(536870912),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+ fu.Output(65535),
+ },
+ KV: kw6,
+ }
+
+ fa10 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(65533),
+ // fu.TunnelId(16),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ fu.VlanPcp(1000),
+ fu.UdpDst(65535),
+ fu.UdpSrc(536870912),
+ fu.Ipv4Dst(65535),
+ fu.Ipv4Src(536870912),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+ fu.Output(65535),
+ },
+ KV: kw6,
+ }
ofpstats := fu.MkFlowStat(fa)
- fmt.Println("ofpstats ", ofpstats)
- //ofpstats.Dat
+ ofpstats2 := fu.MkFlowStat(fa2)
+ ofpstats3 := fu.MkFlowStat(fa3)
+ ofpstats4 := fu.MkFlowStat(fa4)
+ ofpstats5 := fu.MkFlowStat(fa5)
+ ofpstats6 := fu.MkFlowStat(fa6)
+ ofpstats7 := fu.MkFlowStat(lldpFa)
+ ofpstats8 := fu.MkFlowStat(dhcpFa)
+ ofpstats9 := fu.MkFlowStat(fa9)
+ ofpstats10 := fu.MkFlowStat(fa10)
+ igmpstats := fu.MkFlowStat(igmpFa)
+
ofpMeterConfig := &ofp.OfpMeterConfig{Flags: 1, MeterId: 1}
- //ofpWritemetaData := &ofp.ofp
flowMetadata := &voltha.FlowMetadata{
Meters: []*ofp.OfpMeterConfig{ofpMeterConfig},
}
@@ -267,6 +477,17 @@
}{
// TODO: Add test cases.
{"AddFlow", args{flow: ofpstats, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats2, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats3, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats4, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats5, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats6, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats7, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats8, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats9, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: igmpstats, flowMetadata: flowMetadata}},
+ {"AddFlow", args{flow: ofpstats10, flowMetadata: flowMetadata}},
+ //ofpstats10
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -276,7 +497,7 @@
}
func TestOpenOltFlowMgr_UpdateOnuInfo(t *testing.T) {
- flowMgr := newMockFlowmgr()
+ // flowMgr := newMockFlowmgr()
type args struct {
intfID uint32
onuID uint32
@@ -299,7 +520,7 @@
}
func TestOpenOltFlowMgr_GetLogicalPortFromPacketIn(t *testing.T) {
- flowMgr := newMockFlowmgr()
+ // flowMgr := newMockFlowmgr()
type args struct {
packetIn *openoltpb2.PacketIndication
}
@@ -332,7 +553,7 @@
}
func TestOpenOltFlowMgr_GetPacketOutGemPortID(t *testing.T) {
- flwMgr := newMockFlowmgr()
+ // flwMgr := newMockFlowmgr()
type args struct {
intfID uint32
@@ -353,7 +574,7 @@
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := flwMgr.GetPacketOutGemPortID(tt.args.intfID, tt.args.onuID, tt.args.portNum)
+ got, err := flowMgr.GetPacketOutGemPortID(tt.args.intfID, tt.args.onuID, tt.args.portNum)
if (err != nil) != tt.wantErr {
t.Errorf("OpenOltFlowMgr.GetPacketOutGemPortID() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -367,7 +588,7 @@
}
func TestOpenOltFlowMgr_DeleteTechProfileInstance(t *testing.T) {
- flwMgr := newMockFlowmgr()
+ // flwMgr := newMockFlowmgr()
type args struct {
intfID uint32
onuID uint32
@@ -380,14 +601,270 @@
wantErr bool
}{
// TODO: Add test cases.
- {"DeleteTechProfileInstance", args{intfID: 0, onuID: 1, uniID: 1, sn: ""}, true},
+ {"DeleteTechProfileInstance", args{intfID: 0, onuID: 1, uniID: 1, sn: ""}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if err := flwMgr.DeleteTechProfileInstance(tt.args.intfID, tt.args.onuID, tt.args.uniID, tt.args.sn); (err != nil) != tt.wantErr {
+ if err := flowMgr.DeleteTechProfileInstance(tt.args.intfID, tt.args.onuID, tt.args.uniID, tt.args.sn); (err != nil) != tt.wantErr {
t.Errorf("OpenOltFlowMgr.DeleteTechProfileInstance() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
+
+func TestOpenOltFlowMgr_checkAndAddFlow(t *testing.T) {
+ // flowMgr := newMockFlowmgr()
+ kw := make(map[string]uint64)
+ kw["table_id"] = 1
+ kw["meter_id"] = 1
+ kw["write_metadata"] = 0x4000000000 // Tech-Profile-ID 64
+
+ // Upstream flow
+ fa := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(536870912),
+ fu.Metadata_ofp(1),
+ fu.IpProto(17), // dhcp
+ fu.VlanPcp(257),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+ fu.Output(65536),
+ fu.PushVlan(0x8100),
+ },
+ KV: kw,
+ }
+
+ // EAPOL
+ fa2 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(536870912),
+ fu.Metadata_ofp(1),
+ fu.EthType(0x888E),
+ fu.VlanPcp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 257)),
+ fu.Output(65536),
+ fu.PushVlan(0x8100),
+ },
+ KV: kw,
+ }
+
+ // HSIA
+ fa3 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(536870912),
+ fu.Metadata_ofp(1),
+ //fu.EthType(0x8100),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0)),
+ fu.Output(65536),
+ fu.PushVlan(0x8100),
+ },
+ KV: kw,
+ }
+
+ fa4 := &fu.FlowArgs{
+ MatchFields: []*ofp.OfpOxmOfbField{
+ fu.InPort(65535),
+ fu.Metadata_ofp(1),
+ fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0),
+ fu.VlanPcp(1),
+ },
+ Actions: []*ofp.OfpAction{
+ //fu.SetField(fu.Metadata_ofp(uint64(ofp.OfpInstructionType_OFPIT_WRITE_METADATA | 2))),
+ fu.SetField(fu.VlanVid(uint32(ofp.OfpVlanId_OFPVID_PRESENT) | 0)),
+ fu.Output(536870912),
+ fu.PopVlan(),
+ },
+ KV: kw,
+ }
+
+ classifierInfo := make(map[string]interface{})
+ actionInfo := make(map[string]interface{})
+ classifierInfo2 := make(map[string]interface{})
+ actionInfo2 := make(map[string]interface{})
+ classifierInfo3 := make(map[string]interface{})
+ actionInfo3 := make(map[string]interface{})
+ classifierInfo4 := make(map[string]interface{})
+ actionInfo4 := make(map[string]interface{})
+ flowState := fu.MkFlowStat(fa)
+ flowState2 := fu.MkFlowStat(fa2)
+ flowState3 := fu.MkFlowStat(fa3)
+ flowState4 := fu.MkFlowStat(fa4)
+ formulateClassifierInfoFromFlow(classifierInfo, flowState)
+ formulateClassifierInfoFromFlow(classifierInfo2, flowState2)
+ formulateClassifierInfoFromFlow(classifierInfo3, flowState3)
+ formulateClassifierInfoFromFlow(classifierInfo4, flowState4)
+
+ err := formulateActionInfoFromFlow(actionInfo, classifierInfo, flowState)
+ if err != nil {
+ // Error logging is already done in the called function
+ // So just return in case of error
+ return
+ }
+
+ err = formulateActionInfoFromFlow(actionInfo2, classifierInfo2, flowState2)
+ if err != nil {
+ // Error logging is already done in the called function
+ // So just return in case of error
+ return
+ }
+
+ err = formulateActionInfoFromFlow(actionInfo3, classifierInfo3, flowState3)
+ if err != nil {
+ // Error logging is already done in the called function
+ // So just return in case of error
+ return
+ }
+
+ err = formulateActionInfoFromFlow(actionInfo4, classifierInfo4, flowState4)
+ if err != nil {
+ // Error logging is already done in the called function
+ // So just return in case of error
+ return
+ }
+
+ //ofpMeterConfig := &ofp.OfpMeterConfig{Flags: 1, MeterId: 1}
+ //flowMetadata := &voltha.FlowMetadata{
+ // Meters: []*ofp.OfpMeterConfig{ofpMeterConfig},
+ //}
+
+ TpInst := &tp.TechProfile{
+ Name: "Test-Tech-Profile",
+ SubscriberIdentifier: "257",
+ ProfileType: "Mock",
+ Version: 1,
+ NumGemPorts: 4,
+ NumTconts: 1,
+ InstanceCtrl: tp.InstanceControl{
+ Onu: "1",
+ Uni: "16",
+ },
+ }
+
+ type fields struct {
+ techprofile []tp.TechProfileIf
+ deviceHandler *DeviceHandler
+ resourceMgr *rsrcMgr.OpenOltResourceMgr
+ onuIds map[onuIDKey]onuInfo
+ onuSerialNumbers map[string]onuInfo
+ onuGemPortIds map[gemPortKey]onuInfo
+ packetInGemPort map[packetInInfoKey]uint32
+ storedDeviceFlows []ofp.OfpFlowStats
+ }
+ type args struct {
+ args map[string]uint32
+ classifierInfo map[string]interface{}
+ actionInfo map[string]interface{}
+ flow *ofp.OfpFlowStats
+ gemPort uint32
+ intfID uint32
+ onuID uint32
+ uniID uint32
+ portNo uint32
+ TpInst *tp.TechProfile
+ allocID []uint32
+ gemPorts []uint32
+ TpID uint32
+ uni string
+ }
+ tests := []struct {
+ name string
+ fields fields
+ args args
+ }{
+ {
+ name: "checkAndAddFlow-1",
+ args: args{
+ args: nil,
+ classifierInfo: classifierInfo,
+ actionInfo: actionInfo,
+ flow: flowState,
+ gemPort: 1,
+ intfID: 1,
+ onuID: 1,
+ uniID: 16,
+ portNo: 1,
+ TpInst: TpInst,
+ allocID: []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+ gemPorts: []uint32{1, 2, 3, 4},
+ TpID: 64,
+ uni: "16",
+ },
+ },
+ {
+ name: "checkAndAddFlow-2",
+ args: args{
+ args: nil,
+ classifierInfo: classifierInfo2,
+ actionInfo: actionInfo2,
+ flow: flowState2,
+ gemPort: 1,
+ intfID: 1,
+ onuID: 1,
+ uniID: 16,
+ portNo: 1,
+ TpInst: TpInst,
+ allocID: []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+ gemPorts: []uint32{1, 2, 3, 4},
+ TpID: 64,
+ uni: "16",
+ },
+ },
+ {
+ name: "checkAndAddFlow-3",
+ args: args{
+ args: nil,
+ classifierInfo: classifierInfo3,
+ actionInfo: actionInfo3,
+ flow: flowState3,
+ gemPort: 1,
+ intfID: 1,
+ onuID: 1,
+ uniID: 16,
+ portNo: 1,
+ TpInst: TpInst,
+ allocID: []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+ gemPorts: []uint32{1, 2, 3, 4},
+ TpID: 64,
+ uni: "16",
+ },
+ },
+ {
+ name: "checkAndAddFlow-4",
+ args: args{
+ args: nil,
+ classifierInfo: classifierInfo4,
+ actionInfo: actionInfo4,
+ flow: flowState4,
+ gemPort: 1,
+ intfID: 1,
+ onuID: 1,
+ uniID: 16,
+ portNo: 1,
+ TpInst: TpInst,
+ allocID: []uint32{0x8001, 0x8002, 0x8003, 0x8004},
+ gemPorts: []uint32{1, 2, 3, 4},
+ TpID: 64,
+ uni: "16",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ flowMgr.checkAndAddFlow(tt.args.args, tt.args.classifierInfo, tt.args.actionInfo, tt.args.flow, tt.args.gemPort,
+ tt.args.intfID, tt.args.onuID, tt.args.uniID, tt.args.portNo, tt.args.TpInst, tt.args.allocID, tt.args.gemPorts,
+ tt.args.TpID, tt.args.uni)
+ })
+ }
+}
diff --git a/mocks/mockKVClient.go b/mocks/mockKVClient.go
index e11a756..2417f51 100644
--- a/mocks/mockKVClient.go
+++ b/mocks/mockKVClient.go
@@ -23,8 +23,12 @@
"strconv"
"strings"
+ "github.com/opencord/voltha-go/common/log"
+ "github.com/opencord/voltha-openolt-adapter/adaptercore/resourcemanager"
+
"github.com/opencord/voltha-go/db/kvstore"
ofp "github.com/opencord/voltha-protos/go/openflow_13"
+ openolt "github.com/opencord/voltha-protos/go/openolt"
)
const (
@@ -32,6 +36,16 @@
MeterConfig = "meter_id"
// TpIDPathSuffix to extract Techprofile
TpIDPathSuffix = "tp_id"
+ // FlowIDpool to extract Flow ids
+ FlowIDpool = "flow_id_pool"
+ // FlowIDs to extract flow_ids
+ FlowIDs = "flow_ids"
+ // FlowIDInfo to extract flowId info
+ FlowIDInfo = "flow_id_info"
+ // GemportIDs to gemport_ids
+ GemportIDs = "gemport_ids"
+ // AllocIDs to extract alloc_ids
+ AllocIDs = "alloc_ids"
)
// MockKVClient mocks the AdapterProxy interface.
@@ -50,8 +64,17 @@
// Get mock function implementation for KVClient
func (kvclient *MockKVClient) Get(key string, timeout int, lock ...bool) (*kvstore.KVPair, error) {
+ log.Debugw("Warning Warning Warning: Get of MockKVClient called", log.Fields{"key": key})
if key != "" {
-
+ log.Debug("Warning Key Not Blank")
+ if strings.Contains(key, "meter_id/{0,62,8}/{upstream}") {
+ meterConfig := ofp.OfpMeterConfig{
+ Flags: 0,
+ MeterId: 1,
+ }
+ str, _ := json.Marshal(meterConfig)
+ return kvstore.NewKVPair(key, string(str), "mock", 3000, 1), nil
+ }
if strings.Contains(key, MeterConfig) {
var bands []*ofp.OfpMeterBandHeader
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DSCP_REMARK,
@@ -60,15 +83,23 @@
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DSCP_REMARK,
Rate: 1024, Data: &ofp.OfpMeterBandHeader_DscpRemark{DscpRemark: &ofp.OfpMeterBandDscpRemark{PrecLevel: 3}}})
- // bands = append(bands, &ofp.OfpMeterBandHeader{})
- // Data: &ofp.OfpMeterBandHeader_Drop{Drop: &ofp.OfpMeterBandDrop{}}
sep := strings.Split(key, "/")[2]
val, _ := strconv.ParseInt(strings.Split(sep, ",")[1], 10, 32)
if uint32(val) > 1 {
meterConfig := &ofp.OfpMeterConfig{MeterId: uint32(val), Bands: bands}
str, _ := json.Marshal(meterConfig)
- //json.marshall()
- return kvstore.NewKVPair(key, string(str), "mock", 3000, 1), nil
+
+ return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
+ }
+
+ if strings.Contains(key, "meter_id/{1,1,1}/{downstream}") {
+
+ band1 := &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 1000, BurstSize: 5000}
+ band2 := &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 2000, BurstSize: 5000}
+ bands := []*ofp.OfpMeterBandHeader{band1, band2}
+ ofpMeterConfig := &ofp.OfpMeterConfig{Flags: 1, MeterId: 1, Bands: bands}
+ str, _ := json.Marshal(ofpMeterConfig)
+ return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
}
if uint32(val) == 1 {
return nil, nil
@@ -76,6 +107,43 @@
return nil, errors.New("invalid meter")
}
if strings.Contains(key, TpIDPathSuffix) {
+ str, _ := json.Marshal(64)
+ return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
+ }
+ if strings.Contains(key, FlowIDpool) {
+ log.Debug("Error Error Error Key:", FlowIDpool)
+ data := make(map[string]interface{})
+ data["pool"] = "1024"
+ data["start_idx"] = 1
+ data["end_idx"] = 1024
+ str, _ := json.Marshal(data)
+ return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
+ }
+ if strings.Contains(key, FlowIDs) {
+ data := []uint32{1, 2}
+ log.Debug("Error Error Error Key:", FlowIDs)
+ str, _ := json.Marshal(data)
+ return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
+ }
+ if strings.Contains(key, FlowIDInfo) {
+
+ data := []resourcemanager.FlowInfo{
+ {
+ Flow: &openolt.Flow{FlowId: 1, OnuId: 1, UniId: 1, GemportId: 1},
+ FlowStoreCookie: uint64(48132224281636694),
+ },
+ }
+ log.Debug("Error Error Error Key:", FlowIDs)
+ str, _ := json.Marshal(data)
+ return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
+ }
+ if strings.Contains(key, GemportIDs) {
+ log.Debug("Error Error Error Key:", GemportIDs)
+ str, _ := json.Marshal(1)
+ return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
+ }
+ if strings.Contains(key, AllocIDs) {
+ log.Debug("Error Error Error Key:", AllocIDs)
str, _ := json.Marshal(1)
return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
}
diff --git a/mocks/mockTechprofile.go b/mocks/mockTechprofile.go
new file mode 100644
index 0000000..138e7ea
--- /dev/null
+++ b/mocks/mockTechprofile.go
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package mocks provides the mocks for openolt-adapter.
+package mocks
+
+import (
+ "github.com/opencord/voltha-go/common/log"
+ tp "github.com/opencord/voltha-go/common/techprofile"
+ "github.com/opencord/voltha-go/db/model"
+ tp_pb "github.com/opencord/voltha-protos/go/tech_profile"
+)
+
+// MockTechProfile mock struct for OpenoltClient.
+type MockTechProfile struct {
+ TpID uint32
+}
+
+// SetKVClient to mock tefhprofile SetKVClient method
+func (m MockTechProfile) SetKVClient() *model.Backend {
+ return &model.Backend{Client: &MockKVClient{}}
+}
+
+// GetTechProfileInstanceKVPath to mock tefhprofile GetTechProfileInstanceKVPath method
+func (m MockTechProfile) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
+ return ""
+
+}
+
+// GetTPInstanceFromKVStore to mock tefhprofile GetTPInstanceFromKVStore method
+func (m MockTechProfile) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*tp.TechProfile, error) {
+ log.Debug("Warning Warning Warning: GetTPInstanceFromKVStore")
+ return nil, nil
+
+}
+
+// CreateTechProfInstance to mock tefhprofile CreateTechProfInstance method
+func (m MockTechProfile) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfID uint32) *tp.TechProfile {
+
+ return &tp.TechProfile{
+ Name: "mock-tech-profile",
+ SubscriberIdentifier: "257",
+ ProfileType: "mock",
+ Version: 0,
+ NumGemPorts: 2,
+ NumTconts: 1,
+ UpstreamGemPortAttributeList: nil,
+ DownstreamGemPortAttributeList: nil,
+ }
+
+}
+
+// DeleteTechProfileInstance to mock tefhprofile DeleteTechProfileInstance method
+func (m MockTechProfile) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error {
+ return nil
+}
+
+// GetprotoBufParamValue to mock tefhprofile GetprotoBufParamValue method
+func (m MockTechProfile) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+ return 0
+
+}
+
+// GetUsScheduler to mock tefhprofile GetUsScheduler method
+func (m MockTechProfile) GetUsScheduler(tpInstance *tp.TechProfile) *tp_pb.SchedulerConfig {
+ return &tp_pb.SchedulerConfig{}
+
+}
+
+// GetDsScheduler to mock tefhprofile GetDsScheduler method
+func (m MockTechProfile) GetDsScheduler(tpInstance *tp.TechProfile) *tp_pb.SchedulerConfig {
+ return &tp_pb.SchedulerConfig{}
+}
+
+// GetTrafficScheduler to mock tefhprofile GetTrafficScheduler method
+func (m MockTechProfile) GetTrafficScheduler(tpInstance *tp.TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+ ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
+ return &tp_pb.TrafficScheduler{}
+
+}
+
+// GetTrafficQueues to mock tefhprofile GetTrafficQueues method
+func (m MockTechProfile) GetTrafficQueues(tp *tp.TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue {
+ return []*tp_pb.TrafficQueue{{}}
+}
+
+// GetGemportIDForPbit to mock tefhprofile GetGemportIDForPbit method
+func (m MockTechProfile) GetGemportIDForPbit(tp *tp.TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 {
+ return 0
+}
diff --git a/vendor/github.com/opencord/voltha-go/common/log/log.go b/vendor/github.com/opencord/voltha-go/common/log/log.go
index 33100dc..fe3a4e0 100644
--- a/vendor/github.com/opencord/voltha-go/common/log/log.go
+++ b/vendor/github.com/opencord/voltha-go/common/log/log.go
@@ -286,6 +286,17 @@
return nil
}
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+ i := 0
+ keys := make([]string, len(loggers))
+ for k := range loggers {
+ keys[i] = k
+ i++
+ }
+ return keys
+}
+
// UpdateLogger deletes the logger associated with a caller's package and creates a new logger with the
// defaultFields. If a calling package is holding on to a Logger reference obtained from AddPackage invocation, then
// that package needs to invoke UpdateLogger if it needs to make changes to the default fields and obtain a new logger
@@ -371,6 +382,11 @@
return 0, errors.New(fmt.Sprintf("unknown-package-%s", name))
}
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() int {
+ return levelToInt(cfg.Level.Level())
+}
+
//SetLogLevel sets the log level for the logger corresponding to the caller's package
func SetLogLevel(level int) error {
pkgName, _, _, _ := getCallerInfo()
@@ -382,6 +398,11 @@
return nil
}
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level int) {
+ setLevel(cfg, level)
+}
+
// CleanUp flushed any buffered log entries. Applications should take care to call
// CleanUp before exiting.
func CleanUp() error {
diff --git a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
index 74734f0..1544b8d 100755
--- a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
@@ -125,7 +125,7 @@
Port int // port number for the KV store
OLTModel string
KVStore *model.Backend
- TechProfileMgr *tp.TechProfileMgr
+ TechProfileMgr tp.TechProfileIf // create object of *tp.TechProfileMgr
// Below attribute, pon_resource_ranges, should be initialized
// by reading from KV store.
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
index 2799802..9c34880 100644
--- a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
@@ -142,7 +142,7 @@
// default scheduler contants
const (
- defaultAddtionalBw = AdditionalBW_AdditionalBW_Auto
+ defaultAdditionalBw = AdditionalBW_AdditionalBW_BestEffort
defaultPriority = 0
defaultWeight = 0
defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
@@ -481,13 +481,13 @@
MaxGemPayloadSize: defaultGemPayloadSize},
UsScheduler: Scheduler{
Direction: Direction_name[Direction_UPSTREAM],
- AdditionalBw: AdditionalBW_name[defaultAddtionalBw],
+ AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
Priority: defaultPriority,
Weight: defaultWeight,
QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
DsScheduler: Scheduler{
Direction: Direction_name[Direction_DOWNSTREAM],
- AdditionalBw: AdditionalBW_name[defaultAddtionalBw],
+ AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
Priority: defaultPriority,
Weight: defaultWeight,
QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile_if.go
new file mode 100644
index 0000000..1ed38db
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile_if.go
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+ "github.com/opencord/voltha-go/db/model"
+ tp_pb "github.com/opencord/voltha-protos/go/tech_profile"
+)
+
+type TechProfileIf interface {
+ SetKVClient() *model.Backend
+ GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
+ GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error)
+ CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile
+ DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error
+ GetprotoBufParamValue(paramType string, paramKey string) int32
+ GetUsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig
+ GetDsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig
+ GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+ ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
+ GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue
+ GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
+}
diff --git a/vendor/github.com/opencord/voltha-go/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-go/db/kvstore/etcdclient.go
index f19f365..3af1ef2 100644
--- a/vendor/github.com/opencord/voltha-go/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-go/db/kvstore/etcdclient.go
@@ -127,7 +127,15 @@
c.writeLock.Lock()
defer c.writeLock.Unlock()
- _, err := c.ectdAPI.Put(ctx, key, val)
+
+ var err error
+ // Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
+ // that KV key permanent instead of automatically removing it after a lease expiration
+ if leaseID, ok := c.keyReservations[key]; ok {
+ _, err = c.ectdAPI.Put(ctx, key, val, v3Client.WithLease(*leaseID))
+ } else {
+ _, err = c.ectdAPI.Put(ctx, key, val)
+ }
cancel()
if err != nil {
switch err {
@@ -158,8 +166,8 @@
c.writeLock.Lock()
defer c.writeLock.Unlock()
- // delete the keys
- if _, err := c.ectdAPI.Delete(ctx, key, v3Client.WithPrefix()); err != nil {
+ // delete the key
+ if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
log.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
return err
}
@@ -308,7 +316,8 @@
// listen to receive Events.
func (c *EtcdClient) Watch(key string) chan *Event {
w := v3Client.NewWatcher(c.ectdAPI)
- channel := w.Watch(context.Background(), key, v3Client.WithPrefix())
+ ctx, cancel := context.WithCancel(context.Background())
+ channel := w.Watch(ctx, key)
// Create a new channel
ch := make(chan *Event, maxClientChannelBufferSize)
@@ -316,8 +325,6 @@
// Keep track of the created channels so they can be closed when required
channelMap := make(map[chan *Event]v3Client.Watcher)
channelMap[ch] = w
- //c.writeLock.Lock()
- //defer c.writeLock.Unlock()
channelMaps := c.addChannelMap(key, channelMap)
@@ -325,7 +332,7 @@
// json format.
log.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
// Launch a go routine to listen for updates
- go c.listenForKeyChange(channel, ch)
+ go c.listenForKeyChange(channel, ch, cancel)
return ch
@@ -392,7 +399,6 @@
if err := t.Close(); err != nil {
log.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
}
- close(ch)
pos = i
break
}
@@ -406,11 +412,12 @@
log.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
}
-func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event) {
+func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
log.Debug("start-listening-on-channel ...")
+ defer cancel()
+ defer close(ch)
for resp := range channel {
for _, ev := range resp.Events {
- //log.Debugf("%s %q : %q\n", ev.Type, ev.Kv.Key, ev.Kv.Value)
ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
}
}
diff --git a/vendor/github.com/opencord/voltha-go/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-go/kafka/kafka_inter_container_library.go
index 56b5fa1..f9b3319 100644
--- a/vendor/github.com/opencord/voltha-go/kafka/kafka_inter_container_library.go
+++ b/vendor/github.com/opencord/voltha-go/kafka/kafka_inter_container_library.go
@@ -46,6 +46,9 @@
FromTopic = "fromTopic"
)
+var ErrorTransactionNotAcquired = errors.New("transaction-not-acquired")
+var ErrorTransactionInvalidId = errors.New("transaction-invalid-id")
+
// requestHandlerChannel represents an interface associated with a channel. Whenever, an event is
// obtained from that channel, this interface is invoked. This is used to handle
// async requests into the Core via the kafka messaging bus
@@ -674,15 +677,20 @@
// Check for errors first
lastIndex := len(out) - 1
if out[lastIndex].Interface() != nil { // Error
- if goError, ok := out[lastIndex].Interface().(error); ok {
- returnError = &ic.Error{Reason: goError.Error()}
+ if retError, ok := out[lastIndex].Interface().(error); ok {
+ if retError.Error() == ErrorTransactionNotAcquired.Error() {
+ log.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+ return // Ignore - process is in competing mode and ignored transaction
+ }
+ returnError = &ic.Error{Reason: retError.Error()}
returnedValues = append(returnedValues, returnError)
} else { // Should never happen
returnError = &ic.Error{Reason: "incorrect-error-returns"}
returnedValues = append(returnedValues, returnError)
}
} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
- return // Ignore case - when core is in competing mode
+ log.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+ return // Ignore - should not happen
} else { // Non-error case
success = true
for idx, val := range out {
diff --git a/vendor/github.com/opencord/voltha-go/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-go/kafka/sarama_client.go
index 8037002..9e3ce0c 100755
--- a/vendor/github.com/opencord/voltha-go/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-go/kafka/sarama_client.go
@@ -18,16 +18,15 @@
import (
"errors"
"fmt"
- "strings"
- "sync"
- "time"
-
+ "github.com/Shopify/sarama"
scc "github.com/bsm/sarama-cluster"
"github.com/golang/protobuf/proto"
"github.com/google/uuid"
"github.com/opencord/voltha-go/common/log"
ic "github.com/opencord/voltha-protos/go/inter_container"
- "gopkg.in/Shopify/sarama.v1"
+ "strings"
+ "sync"
+ "time"
)
func init() {
diff --git a/vendor/github.com/opencord/voltha-protos/go/common/common.pb.go b/vendor/github.com/opencord/voltha-protos/go/common/common.pb.go
index d4dd0b0..c50262b 100644
--- a/vendor/github.com/opencord/voltha-protos/go/common/common.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/go/common/common.pb.go
@@ -122,7 +122,7 @@
}
func (AdminState_AdminState) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{3, 0}
+ return fileDescriptor_c2e3fd231961e826, []int{6, 0}
}
// Operational Status
@@ -166,7 +166,7 @@
}
func (OperStatus_OperStatus) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{4, 0}
+ return fileDescriptor_c2e3fd231961e826, []int{7, 0}
}
// Connectivity Status
@@ -198,7 +198,7 @@
}
func (ConnectStatus_ConnectStatus) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{5, 0}
+ return fileDescriptor_c2e3fd231961e826, []int{8, 0}
}
type OperationResp_OperationReturnCode int32
@@ -226,7 +226,7 @@
}
func (OperationResp_OperationReturnCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{6, 0}
+ return fileDescriptor_c2e3fd231961e826, []int{9, 0}
}
// Convey a resource identifier
@@ -340,6 +340,141 @@
var xxx_messageInfo_LogLevel proto.InternalMessageInfo
+type Logging struct {
+ Level LogLevel_LogLevel `protobuf:"varint,1,opt,name=level,proto3,enum=common.LogLevel_LogLevel" json:"level,omitempty"`
+ PackageName string `protobuf:"bytes,2,opt,name=package_name,json=packageName,proto3" json:"package_name,omitempty"`
+ ComponentName string `protobuf:"bytes,3,opt,name=component_name,json=componentName,proto3" json:"component_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Logging) Reset() { *m = Logging{} }
+func (m *Logging) String() string { return proto.CompactTextString(m) }
+func (*Logging) ProtoMessage() {}
+func (*Logging) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c2e3fd231961e826, []int{3}
+}
+
+func (m *Logging) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Logging.Unmarshal(m, b)
+}
+func (m *Logging) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Logging.Marshal(b, m, deterministic)
+}
+func (m *Logging) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Logging.Merge(m, src)
+}
+func (m *Logging) XXX_Size() int {
+ return xxx_messageInfo_Logging.Size(m)
+}
+func (m *Logging) XXX_DiscardUnknown() {
+ xxx_messageInfo_Logging.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Logging proto.InternalMessageInfo
+
+func (m *Logging) GetLevel() LogLevel_LogLevel {
+ if m != nil {
+ return m.Level
+ }
+ return LogLevel_DEBUG
+}
+
+func (m *Logging) GetPackageName() string {
+ if m != nil {
+ return m.PackageName
+ }
+ return ""
+}
+
+func (m *Logging) GetComponentName() string {
+ if m != nil {
+ return m.ComponentName
+ }
+ return ""
+}
+
+// For GetLogLevels(), select component to query
+type LoggingComponent struct {
+ ComponentName string `protobuf:"bytes,1,opt,name=component_name,json=componentName,proto3" json:"component_name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LoggingComponent) Reset() { *m = LoggingComponent{} }
+func (m *LoggingComponent) String() string { return proto.CompactTextString(m) }
+func (*LoggingComponent) ProtoMessage() {}
+func (*LoggingComponent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c2e3fd231961e826, []int{4}
+}
+
+func (m *LoggingComponent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LoggingComponent.Unmarshal(m, b)
+}
+func (m *LoggingComponent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LoggingComponent.Marshal(b, m, deterministic)
+}
+func (m *LoggingComponent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LoggingComponent.Merge(m, src)
+}
+func (m *LoggingComponent) XXX_Size() int {
+ return xxx_messageInfo_LoggingComponent.Size(m)
+}
+func (m *LoggingComponent) XXX_DiscardUnknown() {
+ xxx_messageInfo_LoggingComponent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoggingComponent proto.InternalMessageInfo
+
+func (m *LoggingComponent) GetComponentName() string {
+ if m != nil {
+ return m.ComponentName
+ }
+ return ""
+}
+
+// For returning multiple log levels
+type Loggings struct {
+ Items []*Logging `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Loggings) Reset() { *m = Loggings{} }
+func (m *Loggings) String() string { return proto.CompactTextString(m) }
+func (*Loggings) ProtoMessage() {}
+func (*Loggings) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c2e3fd231961e826, []int{5}
+}
+
+func (m *Loggings) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Loggings.Unmarshal(m, b)
+}
+func (m *Loggings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Loggings.Marshal(b, m, deterministic)
+}
+func (m *Loggings) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Loggings.Merge(m, src)
+}
+func (m *Loggings) XXX_Size() int {
+ return xxx_messageInfo_Loggings.Size(m)
+}
+func (m *Loggings) XXX_DiscardUnknown() {
+ xxx_messageInfo_Loggings.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Loggings proto.InternalMessageInfo
+
+func (m *Loggings) GetItems() []*Logging {
+ if m != nil {
+ return m.Items
+ }
+ return nil
+}
+
type AdminState struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -350,7 +485,7 @@
func (m *AdminState) String() string { return proto.CompactTextString(m) }
func (*AdminState) ProtoMessage() {}
func (*AdminState) Descriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{3}
+ return fileDescriptor_c2e3fd231961e826, []int{6}
}
func (m *AdminState) XXX_Unmarshal(b []byte) error {
@@ -381,7 +516,7 @@
func (m *OperStatus) String() string { return proto.CompactTextString(m) }
func (*OperStatus) ProtoMessage() {}
func (*OperStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{4}
+ return fileDescriptor_c2e3fd231961e826, []int{7}
}
func (m *OperStatus) XXX_Unmarshal(b []byte) error {
@@ -412,7 +547,7 @@
func (m *ConnectStatus) String() string { return proto.CompactTextString(m) }
func (*ConnectStatus) ProtoMessage() {}
func (*ConnectStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{5}
+ return fileDescriptor_c2e3fd231961e826, []int{8}
}
func (m *ConnectStatus) XXX_Unmarshal(b []byte) error {
@@ -447,7 +582,7 @@
func (m *OperationResp) String() string { return proto.CompactTextString(m) }
func (*OperationResp) ProtoMessage() {}
func (*OperationResp) Descriptor() ([]byte, []int) {
- return fileDescriptor_c2e3fd231961e826, []int{6}
+ return fileDescriptor_c2e3fd231961e826, []int{9}
}
func (m *OperationResp) XXX_Unmarshal(b []byte) error {
@@ -492,6 +627,9 @@
proto.RegisterType((*ID)(nil), "common.ID")
proto.RegisterType((*IDs)(nil), "common.IDs")
proto.RegisterType((*LogLevel)(nil), "common.LogLevel")
+ proto.RegisterType((*Logging)(nil), "common.Logging")
+ proto.RegisterType((*LoggingComponent)(nil), "common.LoggingComponent")
+ proto.RegisterType((*Loggings)(nil), "common.Loggings")
proto.RegisterType((*AdminState)(nil), "common.AdminState")
proto.RegisterType((*OperStatus)(nil), "common.OperStatus")
proto.RegisterType((*ConnectStatus)(nil), "common.ConnectStatus")
@@ -501,41 +639,47 @@
func init() { proto.RegisterFile("voltha_protos/common.proto", fileDescriptor_c2e3fd231961e826) }
var fileDescriptor_c2e3fd231961e826 = []byte{
- // 562 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x4f, 0x4f, 0xdb, 0x4e,
- 0x10, 0x8d, 0x9d, 0x3f, 0xc0, 0x04, 0x82, 0x7f, 0xfb, 0x2b, 0x12, 0x45, 0xad, 0x14, 0xf9, 0x02,
- 0x6d, 0x05, 0x48, 0xf4, 0x56, 0xb5, 0x87, 0xc5, 0x5e, 0xd2, 0x15, 0x66, 0x37, 0x5a, 0xdb, 0x20,
- 0xf5, 0x40, 0x64, 0xe2, 0x25, 0x58, 0x4a, 0xbc, 0x56, 0xbc, 0x20, 0x71, 0xec, 0x07, 0xec, 0x57,
- 0xe8, 0x67, 0xe8, 0xa9, 0xe7, 0x6a, 0xed, 0xa0, 0x24, 0x55, 0x6f, 0x7e, 0x6f, 0x66, 0xe7, 0xcd,
- 0xf3, 0xcc, 0xc0, 0xc1, 0x93, 0x9a, 0xea, 0x87, 0x64, 0x54, 0xcc, 0x95, 0x56, 0xe5, 0xe9, 0x58,
- 0xcd, 0x66, 0x2a, 0x3f, 0xa9, 0x10, 0xea, 0xd4, 0xe8, 0xa0, 0xbf, 0x9e, 0xf3, 0x9c, 0xe4, 0x93,
- 0x91, 0x2a, 0x74, 0xa6, 0xf2, 0xb2, 0xce, 0x74, 0x5f, 0x81, 0x4d, 0x7d, 0xd4, 0x03, 0x3b, 0x4b,
- 0xf7, 0xad, 0xbe, 0x75, 0xb4, 0x25, 0xec, 0x2c, 0x75, 0x0f, 0xa1, 0x49, 0xfd, 0x12, 0xf5, 0xa1,
- 0x9d, 0x69, 0x39, 0x2b, 0xf7, 0xad, 0x7e, 0xf3, 0xa8, 0x7b, 0x06, 0x27, 0x0b, 0x11, 0xea, 0x8b,
- 0x3a, 0xe0, 0x8e, 0x61, 0x33, 0x50, 0x93, 0x40, 0x3e, 0xc9, 0xa9, 0x3b, 0x5c, 0x7e, 0xa3, 0x2d,
- 0x68, 0xfb, 0xe4, 0x3c, 0x1e, 0x38, 0x0d, 0xb4, 0x09, 0x2d, 0xca, 0x2e, 0xb8, 0x63, 0xa1, 0x2e,
- 0x6c, 0xdc, 0x60, 0xc1, 0x28, 0x1b, 0x38, 0xb6, 0xc9, 0x20, 0x42, 0x70, 0xe1, 0x34, 0xd1, 0x36,
- 0x6c, 0x7a, 0x82, 0x46, 0xd4, 0xc3, 0x81, 0xd3, 0x32, 0x81, 0x0b, 0x1c, 0xe1, 0xc0, 0x69, 0x7f,
- 0x6a, 0xff, 0xfa, 0xfd, 0xe3, 0x6d, 0xc3, 0xfd, 0x6e, 0x01, 0xe0, 0x74, 0x96, 0xe5, 0xa1, 0x4e,
- 0xb4, 0x74, 0xa7, 0xab, 0xc8, 0x14, 0x8d, 0xd9, 0x25, 0xe3, 0x37, 0xcc, 0x69, 0x20, 0x04, 0xbd,
- 0xa1, 0x20, 0x43, 0xc1, 0xaf, 0x69, 0x48, 0x39, 0x23, 0x7e, 0xad, 0x4a, 0x18, 0x3e, 0x0f, 0x88,
- 0xef, 0xd8, 0x46, 0xca, 0xa7, 0x61, 0x8d, 0x9a, 0x68, 0x0f, 0xfe, 0xf3, 0xf9, 0x0d, 0x0b, 0x38,
- 0xf6, 0x29, 0x1b, 0x8c, 0xe8, 0x15, 0x1e, 0x10, 0xa7, 0x65, 0x5e, 0xf8, 0x24, 0x20, 0x11, 0xf1,
- 0x97, 0x3d, 0x94, 0x00, 0xbc, 0x90, 0x73, 0xa3, 0xf9, 0x58, 0xba, 0xb7, 0xab, 0x68, 0xbd, 0x85,
- 0x1e, 0x80, 0x4f, 0x43, 0x8f, 0x5f, 0x13, 0x51, 0xc9, 0xf7, 0x00, 0xb0, 0x17, 0xd1, 0x6b, 0x1c,
- 0xd5, 0xbe, 0xbb, 0xb0, 0x11, 0x91, 0xb0, 0x02, 0x4d, 0x04, 0xd0, 0xa9, 0x82, 0x46, 0x15, 0xa0,
- 0x73, 0x81, 0x69, 0xb0, 0x2a, 0x1a, 0xc1, 0x8e, 0xa7, 0xf2, 0x5c, 0x8e, 0xf5, 0x42, 0xf7, 0xf3,
- 0x5f, 0xc4, 0xba, 0xf4, 0x2e, 0x74, 0x63, 0x26, 0x08, 0xf6, 0xbe, 0x1a, 0x83, 0x8e, 0x85, 0x76,
- 0x60, 0x6b, 0x09, 0xed, 0x97, 0xaa, 0x3f, 0x2d, 0xd8, 0x31, 0xdd, 0x27, 0x66, 0x0f, 0x84, 0x2c,
- 0x0b, 0xf4, 0x05, 0x5a, 0x63, 0x95, 0xca, 0x6a, 0x01, 0x7a, 0x67, 0xef, 0x5e, 0xc6, 0xbc, 0x96,
- 0xb4, 0x8a, 0xf4, 0xe3, 0x3c, 0xf7, 0x54, 0x2a, 0x45, 0xf5, 0x0c, 0x1d, 0xc2, 0x6e, 0x92, 0xa6,
- 0x99, 0x89, 0x25, 0xd3, 0x51, 0x96, 0xdf, 0xab, 0x7d, 0xbb, 0x5a, 0xa5, 0xde, 0x92, 0xa6, 0xf9,
- 0xbd, 0x72, 0x6f, 0xe1, 0xff, 0x7f, 0x54, 0x31, 0x63, 0xe0, 0x43, 0x22, 0x70, 0x44, 0x39, 0x1b,
- 0x85, 0xb1, 0xe7, 0x91, 0x30, 0x74, 0x1a, 0xeb, 0xb4, 0xf9, 0x35, 0xb1, 0x30, 0xa6, 0x5e, 0xc3,
- 0xde, 0x92, 0x8e, 0x59, 0x18, 0x0f, 0x87, 0x5c, 0x98, 0x59, 0xbd, 0x18, 0x7c, 0xff, 0x06, 0xb6,
- 0x23, 0x59, 0xea, 0x2b, 0x95, 0xca, 0x4b, 0xf9, 0x5c, 0x9a, 0xa1, 0x27, 0x45, 0x36, 0xd2, 0xb2,
- 0xd4, 0x4e, 0xe3, 0xfc, 0xf8, 0xdb, 0x87, 0x49, 0xa6, 0x1f, 0x1e, 0xef, 0x8c, 0xcd, 0x53, 0x55,
- 0xc8, 0x7c, 0xac, 0xe6, 0xe9, 0x69, 0x7d, 0x29, 0xc7, 0x8b, 0x4b, 0x99, 0xa8, 0xc5, 0x41, 0xdd,
- 0x75, 0x2a, 0xe6, 0xe3, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x16, 0xe2, 0xd0, 0x6f, 0x03,
- 0x00, 0x00,
+ // 661 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x54, 0x4d, 0x4f, 0xdb, 0x4a,
+ 0x14, 0x8d, 0xf3, 0x05, 0xdc, 0x90, 0xe0, 0x37, 0xef, 0x21, 0x01, 0x7a, 0x95, 0x52, 0x4b, 0x08,
+ 0xda, 0x0a, 0xa2, 0xd2, 0x55, 0xab, 0x76, 0x61, 0xec, 0x21, 0x1d, 0x61, 0xc6, 0xd1, 0xd8, 0x01,
+ 0xa9, 0x0b, 0x22, 0x13, 0x0f, 0xc6, 0x6a, 0x32, 0x63, 0xc5, 0x06, 0x89, 0x65, 0xa5, 0xfe, 0xbd,
+ 0xfe, 0x85, 0xfe, 0x86, 0xae, 0xba, 0xae, 0xc6, 0x76, 0x48, 0x52, 0xb1, 0xf3, 0x39, 0x73, 0xae,
+ 0xcf, 0xbd, 0xe7, 0x8e, 0x06, 0xf6, 0x1e, 0xe4, 0x24, 0xbb, 0x0b, 0x46, 0xc9, 0x4c, 0x66, 0x32,
+ 0xed, 0x8d, 0xe5, 0x74, 0x2a, 0xc5, 0x71, 0x8e, 0x50, 0xb3, 0x40, 0x7b, 0xdd, 0x55, 0xcd, 0x63,
+ 0x20, 0xa2, 0x91, 0x4c, 0xb2, 0x58, 0x8a, 0xb4, 0x50, 0x1a, 0xff, 0x41, 0x95, 0xd8, 0xa8, 0x03,
+ 0xd5, 0x38, 0xdc, 0xd1, 0xba, 0xda, 0xe1, 0x06, 0xab, 0xc6, 0xa1, 0x71, 0x00, 0x35, 0x62, 0xa7,
+ 0xa8, 0x0b, 0x8d, 0x38, 0xe3, 0xd3, 0x74, 0x47, 0xeb, 0xd6, 0x0e, 0x5b, 0x27, 0x70, 0x5c, 0x9a,
+ 0x10, 0x9b, 0x15, 0x07, 0xc6, 0x18, 0xd6, 0x1d, 0x19, 0x39, 0xfc, 0x81, 0x4f, 0x8c, 0xc1, 0xe2,
+ 0x1b, 0x6d, 0x40, 0xc3, 0xc6, 0xa7, 0xc3, 0xbe, 0x5e, 0x41, 0xeb, 0x50, 0x27, 0xf4, 0xcc, 0xd5,
+ 0x35, 0xd4, 0x82, 0xb5, 0x2b, 0x93, 0x51, 0x42, 0xfb, 0x7a, 0x55, 0x29, 0x30, 0x63, 0x2e, 0xd3,
+ 0x6b, 0x68, 0x13, 0xd6, 0x2d, 0x46, 0x7c, 0x62, 0x99, 0x8e, 0x5e, 0x57, 0x07, 0x67, 0xa6, 0x6f,
+ 0x3a, 0x7a, 0xe3, 0x43, 0xe3, 0xd7, 0xef, 0x1f, 0x2f, 0x2a, 0xc6, 0x77, 0x0d, 0xd6, 0x1c, 0x19,
+ 0x45, 0xb1, 0x88, 0x50, 0x0f, 0x1a, 0x13, 0xe5, 0x90, 0x37, 0xdb, 0x39, 0xd9, 0x9d, 0xb7, 0x34,
+ 0x77, 0x7e, 0xfa, 0x60, 0x85, 0x0e, 0xbd, 0x84, 0xcd, 0x24, 0x18, 0x7f, 0x0d, 0x22, 0x3e, 0x12,
+ 0xc1, 0x94, 0xef, 0x54, 0xf3, 0x21, 0x5b, 0x25, 0x47, 0x83, 0x29, 0x47, 0xfb, 0xd0, 0x19, 0xcb,
+ 0x69, 0x22, 0x05, 0x17, 0x59, 0x21, 0xaa, 0xe5, 0xa2, 0xf6, 0x13, 0xab, 0x64, 0xc6, 0x7b, 0xd0,
+ 0xcb, 0x2e, 0xac, 0x39, 0xff, 0x4c, 0xa9, 0xf6, 0x5c, 0xe9, 0xdb, 0x3c, 0x1a, 0x55, 0x9a, 0xa2,
+ 0xfd, 0xd5, 0x50, 0xb7, 0x96, 0x26, 0x50, 0x82, 0x79, 0xb2, 0xdf, 0x34, 0x00, 0x33, 0x9c, 0xc6,
+ 0xc2, 0xcb, 0x82, 0x8c, 0x1b, 0x93, 0x65, 0xa4, 0x92, 0x1c, 0xd2, 0x73, 0xea, 0x5e, 0x51, 0xbd,
+ 0x82, 0x10, 0x74, 0x06, 0x0c, 0x0f, 0x98, 0x7b, 0x49, 0x3c, 0xe2, 0x52, 0x6c, 0x17, 0x51, 0x63,
+ 0x6a, 0x9e, 0x3a, 0xd8, 0xd6, 0xab, 0x2a, 0x5f, 0x9b, 0x78, 0x05, 0xaa, 0xa1, 0x6d, 0xf8, 0xc7,
+ 0x76, 0xaf, 0xa8, 0xe3, 0x9a, 0x36, 0xa1, 0xfd, 0x11, 0xb9, 0x30, 0xfb, 0x58, 0xaf, 0xab, 0x0a,
+ 0x1b, 0x3b, 0xd8, 0xc7, 0xf6, 0x22, 0xf8, 0x14, 0xc0, 0x4d, 0xf8, 0x4c, 0x79, 0xde, 0xa7, 0xc6,
+ 0xf5, 0x32, 0x5a, 0x6d, 0xa1, 0x03, 0x60, 0x13, 0xcf, 0x72, 0x2f, 0x31, 0xcb, 0xed, 0x3b, 0x00,
+ 0xa6, 0xe5, 0x93, 0x4b, 0xd3, 0x2f, 0x96, 0xdd, 0x82, 0x35, 0x1f, 0x7b, 0x39, 0xa8, 0x21, 0x80,
+ 0x66, 0x7e, 0xa8, 0x5c, 0x01, 0x9a, 0x67, 0x26, 0x71, 0x96, 0x4d, 0x7d, 0x68, 0x5b, 0x52, 0x08,
+ 0x3e, 0xce, 0x4a, 0xdf, 0x8f, 0x7f, 0x11, 0xab, 0xd6, 0x5b, 0xd0, 0x1a, 0x52, 0x86, 0x4d, 0xeb,
+ 0xb3, 0x1a, 0x50, 0xd7, 0x50, 0x1b, 0x36, 0x16, 0xb0, 0x3a, 0xff, 0xeb, 0x4f, 0x0d, 0xda, 0xaa,
+ 0xfb, 0x40, 0x5d, 0x7e, 0xc6, 0xd3, 0x04, 0x7d, 0x82, 0xfa, 0x58, 0x86, 0xbc, 0xbc, 0x48, 0xaf,
+ 0xe6, 0x6b, 0x58, 0x11, 0x2d, 0xa3, 0xec, 0x7e, 0x26, 0x2c, 0x19, 0x72, 0x96, 0x97, 0xa1, 0x03,
+ 0xd8, 0x0a, 0xc2, 0x30, 0x56, 0x67, 0xc1, 0x64, 0x14, 0x8b, 0x5b, 0x59, 0x5e, 0xad, 0xce, 0x82,
+ 0x26, 0xe2, 0x56, 0x1a, 0xd7, 0xf0, 0xef, 0x33, 0x7f, 0x51, 0x6b, 0x70, 0x07, 0x98, 0x99, 0x3e,
+ 0x71, 0xe9, 0xc8, 0x1b, 0x5a, 0x16, 0xf6, 0x3c, 0xbd, 0xb2, 0x4a, 0xab, 0x68, 0x86, 0x4c, 0x0d,
+ 0xb5, 0x0b, 0xdb, 0x0b, 0x7a, 0x48, 0xbd, 0xe1, 0x60, 0xe0, 0x32, 0xb5, 0xab, 0xf9, 0x80, 0xaf,
+ 0xff, 0x87, 0x4d, 0x9f, 0xa7, 0xd9, 0x85, 0x0c, 0xf9, 0x39, 0x7f, 0x4c, 0xd5, 0xd2, 0x83, 0x24,
+ 0x1e, 0x65, 0x3c, 0xcd, 0xf4, 0xca, 0xe9, 0xd1, 0x97, 0x37, 0x51, 0x9c, 0xdd, 0xdd, 0xdf, 0xa8,
+ 0x31, 0x7b, 0x32, 0xe1, 0x62, 0x2c, 0x67, 0x61, 0xaf, 0x78, 0x1e, 0x8e, 0xca, 0xe7, 0x21, 0x92,
+ 0xe5, 0x2b, 0x72, 0xd3, 0xcc, 0x99, 0x77, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xd4, 0xbf,
+ 0xf3, 0x64, 0x04, 0x00, 0x00,
}
diff --git a/vendor/github.com/opencord/voltha-protos/go/inter_container/inter_container.pb.go b/vendor/github.com/opencord/voltha-protos/go/inter_container/inter_container.pb.go
index 15ef51b..650452a 100644
--- a/vendor/github.com/opencord/voltha-protos/go/inter_container/inter_container.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/go/inter_container/inter_container.pb.go
@@ -33,6 +33,15 @@
// LogLevel from public import voltha_protos/common.proto
type LogLevel = common.LogLevel
+// Logging from public import voltha_protos/common.proto
+type Logging = common.Logging
+
+// LoggingComponent from public import voltha_protos/common.proto
+type LoggingComponent = common.LoggingComponent
+
+// Loggings from public import voltha_protos/common.proto
+type Loggings = common.Loggings
+
// AdminState from public import voltha_protos/common.proto
type AdminState = common.AdminState
@@ -130,9 +139,6 @@
// AlarmFilters from public import voltha_protos/voltha.proto
type AlarmFilters = voltha.AlarmFilters
-// Logging from public import voltha_protos/voltha.proto
-type Logging = voltha.Logging
-
// CoreInstance from public import voltha_protos/voltha.proto
type CoreInstance = voltha.CoreInstance
diff --git a/vendor/github.com/opencord/voltha-protos/go/voltha/voltha.pb.go b/vendor/github.com/opencord/voltha-protos/go/voltha/voltha.pb.go
index 9d1498c..17eaecf 100644
--- a/vendor/github.com/opencord/voltha-protos/go/voltha/voltha.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/go/voltha/voltha.pb.go
@@ -53,6 +53,15 @@
// LogLevel from public import voltha_protos/common.proto
type LogLevel = common.LogLevel
+// Logging from public import voltha_protos/common.proto
+type Logging = common.Logging
+
+// LoggingComponent from public import voltha_protos/common.proto
+type LoggingComponent = common.LoggingComponent
+
+// Loggings from public import voltha_protos/common.proto
+type Loggings = common.Loggings
+
// AdminState from public import voltha_protos/common.proto
type AdminState = common.AdminState
@@ -1326,7 +1335,7 @@
}
func (SelfTestResponse_SelfTestResult) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{10, 0}
+ return fileDescriptor_e084f1a60ce7016c, []int{9, 0}
}
type DeviceGroup struct {
@@ -1587,53 +1596,6 @@
return nil
}
-type Logging struct {
- Level common.LogLevel_LogLevel `protobuf:"varint,1,opt,name=level,proto3,enum=common.LogLevel_LogLevel" json:"level,omitempty"`
- PackageName string `protobuf:"bytes,2,opt,name=package_name,json=packageName,proto3" json:"package_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Logging) Reset() { *m = Logging{} }
-func (m *Logging) String() string { return proto.CompactTextString(m) }
-func (*Logging) ProtoMessage() {}
-func (*Logging) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{6}
-}
-
-func (m *Logging) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Logging.Unmarshal(m, b)
-}
-func (m *Logging) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Logging.Marshal(b, m, deterministic)
-}
-func (m *Logging) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Logging.Merge(m, src)
-}
-func (m *Logging) XXX_Size() int {
- return xxx_messageInfo_Logging.Size(m)
-}
-func (m *Logging) XXX_DiscardUnknown() {
- xxx_messageInfo_Logging.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Logging proto.InternalMessageInfo
-
-func (m *Logging) GetLevel() common.LogLevel_LogLevel {
- if m != nil {
- return m.Level
- }
- return common.LogLevel_DEBUG
-}
-
-func (m *Logging) GetPackageName() string {
- if m != nil {
- return m.PackageName
- }
- return ""
-}
-
// CoreInstance represents a core instance. It is data held in memory when a core
// is running. This data is not persistent.
type CoreInstance struct {
@@ -1648,7 +1610,7 @@
func (m *CoreInstance) String() string { return proto.CompactTextString(m) }
func (*CoreInstance) ProtoMessage() {}
func (*CoreInstance) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{7}
+ return fileDescriptor_e084f1a60ce7016c, []int{6}
}
func (m *CoreInstance) XXX_Unmarshal(b []byte) error {
@@ -1694,7 +1656,7 @@
func (m *CoreInstances) String() string { return proto.CompactTextString(m) }
func (*CoreInstances) ProtoMessage() {}
func (*CoreInstances) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{8}
+ return fileDescriptor_e084f1a60ce7016c, []int{7}
}
func (m *CoreInstances) XXX_Unmarshal(b []byte) error {
@@ -1744,7 +1706,7 @@
func (m *Voltha) String() string { return proto.CompactTextString(m) }
func (*Voltha) ProtoMessage() {}
func (*Voltha) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{9}
+ return fileDescriptor_e084f1a60ce7016c, []int{8}
}
func (m *Voltha) XXX_Unmarshal(b []byte) error {
@@ -1840,7 +1802,7 @@
func (m *SelfTestResponse) String() string { return proto.CompactTextString(m) }
func (*SelfTestResponse) ProtoMessage() {}
func (*SelfTestResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{10}
+ return fileDescriptor_e084f1a60ce7016c, []int{9}
}
func (m *SelfTestResponse) XXX_Unmarshal(b []byte) error {
@@ -1882,7 +1844,7 @@
func (m *OfAgentSubscriber) String() string { return proto.CompactTextString(m) }
func (*OfAgentSubscriber) ProtoMessage() {}
func (*OfAgentSubscriber) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{11}
+ return fileDescriptor_e084f1a60ce7016c, []int{10}
}
func (m *OfAgentSubscriber) XXX_Unmarshal(b []byte) error {
@@ -1932,7 +1894,7 @@
func (m *Membership) String() string { return proto.CompactTextString(m) }
func (*Membership) ProtoMessage() {}
func (*Membership) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{12}
+ return fileDescriptor_e084f1a60ce7016c, []int{11}
}
func (m *Membership) XXX_Unmarshal(b []byte) error {
@@ -1980,7 +1942,7 @@
func (m *FlowMetadata) String() string { return proto.CompactTextString(m) }
func (*FlowMetadata) ProtoMessage() {}
func (*FlowMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_e084f1a60ce7016c, []int{13}
+ return fileDescriptor_e084f1a60ce7016c, []int{12}
}
func (m *FlowMetadata) XXX_Unmarshal(b []byte) error {
@@ -2017,7 +1979,6 @@
proto.RegisterType((*AlarmFilterRule)(nil), "voltha.AlarmFilterRule")
proto.RegisterType((*AlarmFilter)(nil), "voltha.AlarmFilter")
proto.RegisterType((*AlarmFilters)(nil), "voltha.AlarmFilters")
- proto.RegisterType((*Logging)(nil), "voltha.Logging")
proto.RegisterType((*CoreInstance)(nil), "voltha.CoreInstance")
proto.RegisterType((*CoreInstances)(nil), "voltha.CoreInstances")
proto.RegisterType((*Voltha)(nil), "voltha.Voltha")
@@ -2030,164 +1991,163 @@
func init() { proto.RegisterFile("voltha_protos/voltha.proto", fileDescriptor_e084f1a60ce7016c) }
var fileDescriptor_e084f1a60ce7016c = []byte{
- // 2505 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0x5b, 0x73, 0xdb, 0xc6,
- 0xf5, 0x17, 0x75, 0xd7, 0x21, 0x29, 0x92, 0x47, 0x37, 0x9a, 0x92, 0x62, 0x69, 0x13, 0x5f, 0xfe,
- 0x4a, 0x44, 0xc6, 0x56, 0xec, 0xf9, 0xd7, 0x69, 0x26, 0xb5, 0x2e, 0x56, 0x59, 0xcb, 0x12, 0x0b,
- 0x5a, 0x76, 0xdb, 0xc4, 0xc3, 0x01, 0x89, 0x15, 0x85, 0x31, 0x08, 0xb0, 0x58, 0x50, 0xae, 0xc6,
- 0xcd, 0xb4, 0x93, 0x5e, 0xa7, 0x8f, 0xcd, 0x57, 0xe8, 0x53, 0xa7, 0xfd, 0x28, 0x7e, 0xea, 0x17,
- 0xe8, 0x74, 0xfa, 0xd0, 0xc7, 0x3e, 0xb9, 0x7d, 0xec, 0xec, 0x05, 0x14, 0x40, 0x00, 0xba, 0xa4,
- 0x99, 0xe9, 0x93, 0x88, 0x3d, 0x67, 0x7f, 0xbf, 0xdf, 0x9e, 0xdd, 0x3d, 0x7b, 0xb0, 0x10, 0x94,
- 0x4e, 0x1c, 0xcb, 0x3b, 0xd6, 0x1b, 0x5d, 0xd7, 0xf1, 0x1c, 0x56, 0x91, 0x4f, 0x65, 0xf1, 0x84,
- 0xe3, 0xf2, 0xa9, 0xb4, 0xd4, 0x76, 0x9c, 0xb6, 0x45, 0x2b, 0x7a, 0xd7, 0xac, 0xe8, 0xb6, 0xed,
- 0x78, 0xba, 0x67, 0x3a, 0x36, 0x93, 0x5e, 0xa5, 0x45, 0x65, 0x15, 0x4f, 0xcd, 0xde, 0x51, 0x85,
- 0x76, 0xba, 0xde, 0xa9, 0x32, 0x16, 0xc3, 0xf0, 0x1d, 0xea, 0x29, 0xf0, 0xd2, 0x00, 0x71, 0xcb,
- 0xe9, 0x74, 0x1c, 0x3b, 0xde, 0x76, 0x4c, 0x75, 0xcb, 0x3b, 0x56, 0x36, 0x12, 0xb6, 0x59, 0x4e,
- 0xdb, 0x6c, 0xe9, 0x56, 0xc3, 0xa0, 0x27, 0x66, 0x8b, 0xc6, 0xf7, 0x0f, 0xd9, 0x16, 0xc3, 0x36,
- 0xdd, 0xd0, 0xbb, 0x1e, 0x75, 0x95, 0xf1, 0x7a, 0xd8, 0xe8, 0x74, 0xa9, 0x7d, 0x64, 0x39, 0xaf,
- 0x1a, 0x77, 0x36, 0x12, 0x1c, 0x3a, 0x2d, 0xb3, 0xd1, 0x31, 0x9b, 0x0d, 0xa3, 0xa9, 0x1c, 0x56,
- 0x63, 0x1c, 0x74, 0x4b, 0x77, 0x3b, 0x67, 0x2e, 0x2b, 0x61, 0x97, 0x53, 0xdd, 0x6e, 0x37, 0x9c,
- 0x6e, 0x20, 0xa4, 0xe4, 0x0f, 0x29, 0x48, 0x6f, 0x0b, 0xd1, 0xbb, 0xae, 0xd3, 0xeb, 0xe2, 0x1c,
- 0x0c, 0x9b, 0x46, 0x31, 0xb5, 0x92, 0xba, 0x3d, 0xb5, 0x39, 0xf6, 0x8f, 0xb7, 0x6f, 0x96, 0x53,
- 0xda, 0xb0, 0x69, 0x60, 0x15, 0x72, 0xe1, 0xe1, 0xb3, 0xe2, 0xf0, 0xca, 0xc8, 0xed, 0xf4, 0xdd,
- 0xb9, 0xb2, 0x9a, 0xc7, 0x3d, 0x69, 0x96, 0x58, 0x9b, 0x53, 0x7f, 0x7b, 0xfb, 0x66, 0x79, 0x94,
- 0x63, 0x69, 0xd3, 0x56, 0xd0, 0xc2, 0x70, 0x03, 0x26, 0x7c, 0x88, 0x11, 0x01, 0x31, 0xed, 0x43,
- 0x44, 0xfb, 0xfa, 0x9e, 0xe4, 0x5b, 0x90, 0x09, 0xa8, 0x64, 0xf8, 0x7f, 0x30, 0x66, 0x7a, 0xb4,
- 0xc3, 0x8a, 0x29, 0x01, 0x31, 0x13, 0x86, 0x10, 0x4e, 0x9a, 0xf4, 0x20, 0x3f, 0x03, 0x7c, 0xc8,
- 0xa3, 0xf2, 0xc8, 0xb4, 0x3c, 0xea, 0x6a, 0x3d, 0x8b, 0x3e, 0xa6, 0xa7, 0xa4, 0x19, 0xd7, 0x8a,
- 0xe3, 0x9c, 0x35, 0x3f, 0x84, 0x93, 0x30, 0xea, 0x9d, 0x76, 0x69, 0x3e, 0x85, 0x19, 0x98, 0x64,
- 0xf4, 0x84, 0xba, 0xa6, 0x77, 0x9a, 0x1f, 0xc6, 0x1c, 0xa4, 0x5d, 0xca, 0x9c, 0x9e, 0xdb, 0xa2,
- 0x0d, 0xd3, 0xc8, 0x8f, 0x70, 0x73, 0x4b, 0xf7, 0x68, 0xdb, 0x71, 0x4f, 0xf3, 0xa3, 0x98, 0x85,
- 0x29, 0x29, 0x98, 0x1b, 0xc7, 0x1e, 0x8c, 0xfd, 0xf3, 0xed, 0x9b, 0xe5, 0x21, 0x72, 0x0c, 0xb9,
- 0x01, 0x2a, 0xfc, 0x14, 0x46, 0x5e, 0xd2, 0x53, 0x11, 0xe6, 0xe9, 0xbb, 0xeb, 0xbe, 0xf8, 0xa8,
- 0xa0, 0x98, 0x26, 0x8d, 0xf7, 0xc4, 0x59, 0x18, 0x3b, 0xd1, 0xad, 0x1e, 0x2d, 0x0e, 0xf3, 0x99,
- 0xd2, 0xe4, 0x03, 0xa9, 0x43, 0x3a, 0xd0, 0x21, 0x69, 0x2e, 0xd7, 0x61, 0xcc, 0xed, 0x59, 0xfd,
- 0x19, 0x5c, 0x48, 0xa0, 0xd7, 0xa4, 0x17, 0xf9, 0x04, 0x32, 0x01, 0x0b, 0xc3, 0x75, 0x98, 0x38,
- 0x92, 0x3f, 0x07, 0x83, 0x1f, 0x04, 0xf0, 0x7d, 0xc8, 0x0b, 0x98, 0xd8, 0x73, 0xda, 0x6d, 0xd3,
- 0x6e, 0x63, 0x05, 0xc6, 0x2c, 0x7a, 0x42, 0x2d, 0x35, 0xee, 0x6b, 0x65, 0xb5, 0x13, 0xf7, 0x9c,
- 0xf6, 0x1e, 0x6f, 0xef, 0xff, 0xd0, 0xa4, 0x1f, 0xae, 0x42, 0xa6, 0xab, 0xb7, 0x5e, 0xea, 0x6d,
- 0xda, 0xb0, 0xf5, 0x8e, 0x3f, 0xd8, 0xb4, 0x6a, 0xdb, 0xd7, 0x3b, 0x94, 0xb8, 0x90, 0xd9, 0x72,
- 0x5c, 0x5a, 0xb5, 0x99, 0xa7, 0xdb, 0x2d, 0x8a, 0x37, 0x21, 0x6d, 0xaa, 0xdf, 0x8d, 0xc1, 0xc1,
- 0x83, 0x6f, 0xa9, 0x1a, 0xb8, 0x01, 0xe3, 0x72, 0xaf, 0x0b, 0xd0, 0xf4, 0xdd, 0x59, 0x7f, 0x10,
- 0xdf, 0x15, 0xad, 0x75, 0x4f, 0xf7, 0x7a, 0x6c, 0x73, 0x8c, 0x2f, 0xc5, 0x21, 0x4d, 0xb9, 0x3e,
- 0x18, 0xfb, 0x37, 0xc7, 0x21, 0x9b, 0x90, 0x0d, 0x72, 0x32, 0x5c, 0x0b, 0xaf, 0xc6, 0x3e, 0x56,
- 0xd0, 0x4b, 0x2d, 0x47, 0x1f, 0xe3, 0xaf, 0xa3, 0x30, 0xfe, 0x4c, 0x78, 0xe1, 0x75, 0x98, 0x38,
- 0xa1, 0x2e, 0x33, 0x1d, 0x3b, 0x2c, 0xd7, 0x6f, 0xc5, 0xfb, 0x30, 0xa9, 0x72, 0x87, 0x3f, 0x67,
- 0xb9, 0x7e, 0xc8, 0x65, 0x7b, 0x70, 0xcf, 0xf4, 0x7d, 0xe3, 0x36, 0xed, 0xc8, 0x7f, 0xbf, 0x69,
- 0x47, 0x2f, 0xbb, 0x69, 0xf1, 0x3b, 0x90, 0x51, 0xdb, 0x81, 0x6f, 0x26, 0x56, 0x1c, 0x13, 0x3d,
- 0x31, 0xdc, 0xf3, 0xe9, 0x69, 0x37, 0xd4, 0x3b, 0x6d, 0xf4, 0x9b, 0x19, 0x6e, 0x41, 0x56, 0x21,
- 0xb4, 0xc5, 0xbe, 0x2f, 0x8e, 0x27, 0x6e, 0xf7, 0x20, 0x86, 0xa2, 0x55, 0xb9, 0x62, 0x0b, 0xb2,
- 0x32, 0x2d, 0xfa, 0xcb, 0x76, 0x22, 0x71, 0xd9, 0x86, 0x40, 0xf4, 0xe0, 0xaa, 0xff, 0x3e, 0x14,
- 0xce, 0x32, 0xb0, 0xee, 0xe9, 0x4d, 0x9d, 0xd1, 0xe2, 0x92, 0x02, 0xe2, 0x96, 0xf2, 0x13, 0xb3,
- 0x29, 0xe5, 0x6c, 0xeb, 0x9e, 0xbe, 0x99, 0xe7, 0x40, 0xe9, 0x40, 0x3e, 0xd0, 0x72, 0xdc, 0x8b,
- 0x3b, 0xa9, 0xde, 0xf8, 0x1c, 0x66, 0x82, 0x39, 0xdb, 0x07, 0x5d, 0x56, 0x53, 0x24, 0x40, 0x85,
- 0xb6, 0x73, 0x61, 0x85, 0x2c, 0xe9, 0xa6, 0x10, 0xfc, 0x25, 0xf6, 0xa7, 0x14, 0xe4, 0xeb, 0xd4,
- 0x3a, 0x7a, 0x4a, 0x99, 0xa7, 0x51, 0xd6, 0x75, 0x6c, 0xc6, 0x33, 0xcf, 0xb8, 0x4b, 0x59, 0xcf,
- 0xf2, 0xd4, 0x26, 0xbc, 0xe5, 0x47, 0x61, 0xd0, 0x33, 0xd8, 0xd0, 0xb3, 0x3c, 0x4d, 0x75, 0x23,
- 0x35, 0x98, 0x0e, 0x5b, 0x30, 0x0d, 0x13, 0xf5, 0xc3, 0xad, 0xad, 0x9d, 0x7a, 0x3d, 0x3f, 0xc4,
- 0x1f, 0x1e, 0x3d, 0xac, 0xee, 0x1d, 0x6a, 0x3b, 0xf9, 0x14, 0x16, 0x20, 0xbb, 0x7f, 0xf0, 0xb4,
- 0x51, 0x3f, 0xac, 0xd5, 0x0e, 0xb4, 0xa7, 0x3b, 0xdb, 0xf9, 0x61, 0xde, 0x74, 0xb8, 0xff, 0x78,
- 0xff, 0xe0, 0xf9, 0x7e, 0x63, 0x47, 0xd3, 0x0e, 0xb4, 0xfc, 0x88, 0x9f, 0x26, 0x0f, 0xa0, 0x70,
- 0x70, 0xf4, 0xb0, 0x4d, 0x6d, 0xaf, 0xde, 0x6b, 0xb2, 0x96, 0x6b, 0x36, 0xa9, 0x8b, 0xcb, 0x00,
- 0xce, 0x91, 0xce, 0x1b, 0xfb, 0xbb, 0x59, 0x9b, 0x52, 0x2d, 0x55, 0x03, 0x17, 0x61, 0x4a, 0x9d,
- 0x70, 0xa6, 0xa1, 0xb2, 0xc3, 0xa4, 0x6c, 0xa8, 0x1a, 0xe4, 0x63, 0x80, 0x27, 0xb4, 0xd3, 0xa4,
- 0x2e, 0x3b, 0x36, 0xbb, 0x1c, 0x49, 0xac, 0x21, 0x99, 0x49, 0x14, 0x92, 0x68, 0xe1, 0x79, 0x04,
- 0xa7, 0x45, 0xae, 0x94, 0x10, 0xc3, 0xa6, 0x41, 0x76, 0x20, 0xf3, 0xc8, 0x72, 0x5e, 0x3d, 0xa1,
- 0x9e, 0xce, 0x67, 0x06, 0xef, 0xc1, 0x78, 0x87, 0x06, 0x92, 0xde, 0x72, 0x39, 0x78, 0x62, 0x3b,
- 0x47, 0xdd, 0x86, 0x30, 0x37, 0x5a, 0x8e, 0x7d, 0x64, 0xb6, 0x35, 0xe5, 0x7c, 0xf7, 0xcf, 0x15,
- 0xc8, 0xca, 0x6d, 0x5e, 0xa7, 0x2e, 0x9f, 0x32, 0x3c, 0x80, 0xe9, 0xc3, 0xae, 0xa1, 0x7b, 0xd4,
- 0x4f, 0x76, 0x98, 0x0b, 0xec, 0x46, 0x9e, 0x27, 0x4b, 0xf3, 0x65, 0x59, 0xe7, 0x94, 0xfd, 0x3a,
- 0xa7, 0xbc, 0xc3, 0xeb, 0x1c, 0x32, 0xfb, 0xe5, 0x5f, 0xfe, 0xfe, 0xd5, 0xf0, 0x34, 0x66, 0x44,
- 0x79, 0x74, 0x72, 0x87, 0x57, 0x24, 0x0c, 0x9f, 0x43, 0x76, 0x97, 0x7a, 0x81, 0x91, 0x26, 0x74,
- 0x2f, 0xf5, 0x37, 0xde, 0x99, 0x2f, 0x29, 0x09, 0xc8, 0x59, 0x44, 0x1f, 0xb2, 0x73, 0x86, 0xf3,
- 0x02, 0xf2, 0x52, 0x69, 0x00, 0x3b, 0x06, 0x23, 0x51, 0xee, 0xb2, 0xc0, 0x5e, 0x20, 0x31, 0xd8,
- 0x0f, 0x52, 0x6b, 0xb8, 0x0d, 0x53, 0xbb, 0xd4, 0x53, 0x39, 0x30, 0x49, 0x73, 0x3f, 0xcd, 0x48,
- 0x3f, 0x92, 0x13, 0x98, 0x53, 0x38, 0xa1, 0x30, 0xb1, 0x07, 0x85, 0x3d, 0x93, 0x79, 0xe1, 0x7c,
- 0x9c, 0x84, 0x36, 0x17, 0x97, 0x98, 0x19, 0xb9, 0xf3, 0xbb, 0x7f, 0xbd, 0x59, 0x9e, 0x50, 0x39,
- 0x5c, 0xfc, 0x46, 0xf9, 0x5b, 0x90, 0xcd, 0x60, 0xc1, 0x1f, 0x80, 0xd9, 0x67, 0xa8, 0x43, 0x6e,
- 0x97, 0x86, 0x58, 0x11, 0xfc, 0xe3, 0xac, 0xba, 0x5d, 0x8a, 0x3d, 0x01, 0xc8, 0x3b, 0x02, 0xaf,
- 0x88, 0xf3, 0x11, 0xbc, 0xca, 0x6b, 0xd3, 0xf8, 0x02, 0x75, 0xc8, 0xf0, 0xb1, 0x3c, 0xf4, 0xf3,
- 0x77, 0xd2, 0x30, 0xf2, 0x03, 0xd9, 0x9f, 0x91, 0x5b, 0x5c, 0x35, 0x9c, 0x1d, 0x13, 0x82, 0x08,
- 0x31, 0xef, 0x13, 0xf5, 0x8f, 0x84, 0xd7, 0x80, 0x9c, 0x62, 0x2f, 0x9c, 0xdd, 0x93, 0x88, 0xe6,
- 0x63, 0xcf, 0x09, 0x46, 0xee, 0x71, 0xba, 0x42, 0xe4, 0x74, 0x11, 0xac, 0xd7, 0x70, 0x21, 0xb0,
- 0x3c, 0x83, 0x66, 0xfc, 0x0c, 0xf2, 0xbb, 0x34, 0xcc, 0x1d, 0x8a, 0x5a, 0xfc, 0xb1, 0x44, 0xde,
- 0x13, 0xb8, 0xef, 0xe0, 0x52, 0x02, 0xae, 0x0c, 0x9e, 0x0b, 0xf3, 0x91, 0x91, 0xd5, 0x1c, 0xd7,
- 0x63, 0xf1, 0x13, 0xa3, 0xfc, 0x84, 0x07, 0xb9, 0xaf, 0x16, 0x40, 0x97, 0x3f, 0x09, 0xb6, 0xf7,
- 0x90, 0x9c, 0xc7, 0x56, 0x11, 0x9e, 0xf8, 0x8b, 0x14, 0xcc, 0x0e, 0x8e, 0x88, 0x23, 0xe2, 0x5c,
- 0x0c, 0x4d, 0xd5, 0x28, 0xcd, 0xc4, 0x34, 0x93, 0x4f, 0x39, 0xf9, 0x38, 0x8c, 0x72, 0x48, 0xc1,
- 0x5d, 0xc6, 0x0f, 0x2e, 0xe6, 0xae, 0xbc, 0xe6, 0x7f, 0x1a, 0x7c, 0xe4, 0xbf, 0x4a, 0xc1, 0xc2,
- 0x8e, 0xad, 0x37, 0x2d, 0x7a, 0x69, 0x21, 0x49, 0x5b, 0xf6, 0x63, 0x21, 0xe0, 0x1e, 0xd9, 0xb8,
- 0x8a, 0x80, 0x0a, 0x15, 0xe4, 0xf8, 0x9b, 0x14, 0x14, 0xb7, 0x4d, 0xf6, 0x8d, 0x08, 0xf9, 0xb6,
- 0x10, 0x72, 0x9f, 0x7c, 0x74, 0x25, 0x21, 0x86, 0x64, 0xc7, 0x9f, 0xc6, 0xac, 0x05, 0x9e, 0xcd,
- 0xc3, 0x6b, 0x01, 0x43, 0x29, 0x5c, 0xd8, 0xc9, 0xa6, 0x5a, 0x09, 0xbc, 0x35, 0x9a, 0x0a, 0x2e,
- 0x5a, 0x15, 0xa2, 0x17, 0x5f, 0x15, 0x4b, 0xfd, 0x14, 0x1f, 0x16, 0xf0, 0x54, 0xc8, 0x5b, 0x8a,
- 0x10, 0x8b, 0x76, 0xd9, 0x27, 0x31, 0x24, 0xeb, 0x42, 0xc2, 0x2d, 0x72, 0x09, 0x09, 0x3c, 0xbd,
- 0xfe, 0x32, 0x05, 0xcb, 0x31, 0x2a, 0x9e, 0xf0, 0x73, 0x49, 0xca, 0x58, 0x0c, 0xc9, 0x10, 0x86,
- 0x27, 0x8e, 0x71, 0x81, 0x8a, 0xb2, 0x50, 0x71, 0x9b, 0xbc, 0x7b, 0xae, 0x0a, 0x79, 0xfa, 0x71,
- 0x19, 0x3f, 0x4f, 0xc1, 0x42, 0x64, 0x2e, 0x04, 0x57, 0x78, 0x32, 0x66, 0xa2, 0x62, 0x18, 0xd9,
- 0xe6, 0x33, 0x30, 0xe9, 0x9f, 0xbc, 0x91, 0xe9, 0xb8, 0x81, 0x97, 0x51, 0x81, 0xbf, 0x4f, 0xc1,
- 0x62, 0xec, 0x72, 0x50, 0xf5, 0x61, 0x50, 0xc6, 0x42, 0x64, 0x6a, 0xa4, 0x13, 0xd9, 0xe7, 0xec,
- 0x59, 0x48, 0x0b, 0x93, 0x2c, 0x46, 0x23, 0x7a, 0xd6, 0xf0, 0xf6, 0x85, 0x73, 0xa3, 0xfa, 0xe2,
- 0x57, 0x29, 0x58, 0x4d, 0x58, 0x24, 0x82, 0x51, 0x4e, 0xd1, 0x6a, 0xbc, 0x9c, 0xcb, 0x2c, 0x97,
- 0x0d, 0x21, 0x69, 0x9d, 0x5c, 0x5a, 0x12, 0x9f, 0xad, 0x17, 0x90, 0xe6, 0x91, 0xba, 0xe8, 0x5c,
- 0xc8, 0x85, 0xeb, 0x6f, 0x46, 0x6e, 0xf0, 0x58, 0x4c, 0xf5, 0xdf, 0x11, 0x04, 0x75, 0x01, 0x73,
- 0x3e, 0xb5, 0x7f, 0x00, 0x18, 0x90, 0x3d, 0x83, 0xaf, 0x1a, 0xc9, 0x04, 0xe9, 0xb3, 0x29, 0x61,
- 0xa4, 0xcc, 0xc1, 0x45, 0x4d, 0x7e, 0xde, 0xd9, 0x2c, 0x39, 0x4c, 0x83, 0xe1, 0x21, 0xe4, 0x35,
- 0xda, 0x72, 0xec, 0x96, 0x69, 0x51, 0x7f, 0x24, 0x41, 0xc0, 0xc4, 0x90, 0x2d, 0x09, 0xcc, 0x79,
- 0x12, 0xc5, 0xe4, 0xb1, 0xd9, 0x11, 0xf5, 0x4a, 0xcc, 0xb1, 0x35, 0xf0, 0x2a, 0xe4, 0xc3, 0xe0,
- 0xec, 0xc0, 0xf0, 0xe5, 0x39, 0xf5, 0x3d, 0xc8, 0x6c, 0xb9, 0x54, 0xf7, 0x94, 0x34, 0x1c, 0xe8,
- 0x1d, 0x41, 0x53, 0x15, 0x1a, 0x19, 0x0c, 0x26, 0x97, 0xf4, 0x1c, 0x32, 0x32, 0xf1, 0xc7, 0xa8,
- 0x4a, 0x1a, 0xe4, 0xbb, 0x02, 0x6f, 0x99, 0x2c, 0xc6, 0xa9, 0xf3, 0x53, 0xf9, 0x0f, 0x21, 0xab,
- 0x32, 0xf9, 0x15, 0x90, 0xd5, 0x39, 0x4d, 0x96, 0x62, 0x91, 0xfd, 0xdc, 0xfc, 0x1c, 0x32, 0x1a,
- 0x6d, 0x3a, 0x8e, 0xf7, 0x8d, 0x69, 0x76, 0x05, 0x1c, 0x07, 0xde, 0xa6, 0x16, 0xf5, 0xbe, 0x46,
- 0x30, 0xd6, 0xe2, 0x81, 0x0d, 0x01, 0x87, 0x3d, 0xc8, 0x6e, 0x3b, 0xaf, 0x6c, 0xcb, 0xd1, 0x8d,
- 0x6a, 0x47, 0x6f, 0xd3, 0xb3, 0xb3, 0x4c, 0x3c, 0xfa, 0xb6, 0xd2, 0x9c, 0x4f, 0x78, 0xd0, 0xa5,
- 0xae, 0xb8, 0xb7, 0xe4, 0xef, 0x52, 0xe4, 0xbe, 0xe0, 0xf8, 0x90, 0xbc, 0x1f, 0xcb, 0x61, 0x72,
- 0x88, 0x86, 0xa1, 0x30, 0x58, 0xe5, 0x35, 0x7f, 0x3d, 0xf9, 0x82, 0x4f, 0xee, 0x97, 0x29, 0x98,
- 0xdf, 0xa5, 0x5e, 0x88, 0x43, 0x5e, 0x4b, 0x24, 0x0b, 0x88, 0x6b, 0x26, 0x0f, 0x84, 0x80, 0x8f,
- 0xf0, 0xee, 0x15, 0x04, 0x54, 0x98, 0x64, 0xea, 0x89, 0x92, 0x2d, 0x84, 0x77, 0x45, 0x76, 0x95,
- 0x87, 0xf0, 0x2a, 0xc3, 0xc7, 0x23, 0x59, 0xa6, 0x86, 0x90, 0xd8, 0xc0, 0x8c, 0xc6, 0xb1, 0x31,
- 0xf2, 0x81, 0xa0, 0xbb, 0x89, 0xef, 0x5d, 0x86, 0x0e, 0x7f, 0x02, 0x33, 0x5b, 0xbc, 0x00, 0xb7,
- 0x2e, 0x39, 0xc2, 0xd8, 0x09, 0x56, 0x23, 0x5c, 0xbb, 0xd2, 0x08, 0x7f, 0x9b, 0x82, 0x99, 0x87,
- 0x2d, 0xcf, 0x3c, 0xd1, 0x3d, 0x2a, 0x58, 0x64, 0x3a, 0xbf, 0x22, 0xf5, 0x96, 0xa0, 0xfe, 0x84,
- 0xfc, 0xff, 0x55, 0xa6, 0x56, 0x36, 0xf7, 0x04, 0x1f, 0x5f, 0x68, 0xbf, 0x4e, 0x41, 0x41, 0xa3,
- 0x27, 0xd4, 0xf5, 0xfe, 0x27, 0x42, 0x5c, 0x41, 0xcd, 0x85, 0x7c, 0x0e, 0xb9, 0xb3, 0xe3, 0x21,
- 0x5a, 0xbb, 0x67, 0x7d, 0x45, 0xb2, 0x68, 0x2f, 0x47, 0x8a, 0xf6, 0x25, 0x2c, 0xc5, 0xd2, 0xcb,
- 0x62, 0xfd, 0x05, 0xcc, 0x04, 0xd0, 0x3b, 0x5b, 0xe2, 0x45, 0x3d, 0xcc, 0x50, 0xe8, 0x33, 0xf8,
- 0x66, 0x72, 0x4b, 0x20, 0xaf, 0xe2, 0xf5, 0x78, 0xe4, 0x8e, 0x7a, 0xe1, 0x67, 0x68, 0xc3, 0x9c,
- 0x8c, 0xdc, 0x20, 0x41, 0x14, 0x34, 0x31, 0x1d, 0xad, 0xc9, 0x2a, 0x93, 0x5c, 0x44, 0xc6, 0x83,
- 0xd5, 0x09, 0x06, 0xeb, 0x72, 0xc5, 0xed, 0x83, 0x73, 0x8b, 0xdb, 0xa4, 0xe8, 0xf5, 0x8b, 0xda,
- 0xd9, 0x30, 0xdf, 0x55, 0xaa, 0xa7, 0x47, 0x97, 0xa8, 0x9e, 0x08, 0xae, 0x24, 0xf2, 0xfb, 0x55,
- 0x93, 0x13, 0x1c, 0xb4, 0xbc, 0x22, 0x4c, 0x2a, 0x21, 0x66, 0xa2, 0xd7, 0x8c, 0x8c, 0x54, 0x38,
- 0xeb, 0x74, 0xf8, 0x5a, 0x32, 0xfe, 0xb4, 0x96, 0x36, 0xd4, 0xc4, 0xe5, 0xca, 0x19, 0xc4, 0x40,
- 0x8c, 0x23, 0x14, 0x64, 0x55, 0xc0, 0x2d, 0xe2, 0xb5, 0x38, 0x38, 0x59, 0x01, 0x30, 0xc8, 0x9f,
- 0x0d, 0x42, 0x45, 0x31, 0x69, 0x14, 0xb3, 0x31, 0x37, 0x9d, 0xea, 0xc2, 0x22, 0x37, 0x70, 0x37,
- 0x2a, 0x6f, 0x5b, 0x70, 0x6e, 0x80, 0x58, 0x45, 0xee, 0x11, 0xe4, 0xeb, 0x9e, 0x4b, 0xf5, 0x4e,
- 0x4d, 0x6f, 0xbd, 0xa4, 0x1e, 0x3b, 0xe8, 0x79, 0x38, 0x1f, 0x9a, 0x2e, 0x69, 0x38, 0xe8, 0x79,
- 0x89, 0xcb, 0x73, 0xe8, 0x76, 0x0a, 0x77, 0x44, 0x71, 0x45, 0xcd, 0x13, 0xaa, 0x80, 0xaa, 0xf6,
- 0x39, 0xd7, 0x2d, 0x51, 0xfc, 0xaa, 0x4d, 0x86, 0x3e, 0x4c, 0xe1, 0x63, 0x98, 0x51, 0x30, 0x5b,
- 0xc7, 0xba, 0xdd, 0xa6, 0x3b, 0x27, 0xd4, 0xf6, 0x92, 0xc3, 0x50, 0x0c, 0x21, 0x05, 0xba, 0x08,
- 0xb0, 0x43, 0x98, 0xee, 0x4f, 0x92, 0xfc, 0x8a, 0x15, 0x7e, 0xb3, 0x88, 0x86, 0x90, 0x90, 0xf8,
- 0x25, 0xaf, 0xa2, 0x25, 0xe7, 0xa9, 0x01, 0x05, 0x59, 0xa9, 0x05, 0xbf, 0xa9, 0xc4, 0xdd, 0x1a,
- 0x97, 0xe2, 0x1a, 0xc9, 0x8a, 0xa0, 0x28, 0x91, 0xfe, 0x84, 0x84, 0x2e, 0xa1, 0xf9, 0x16, 0x96,
- 0xba, 0x83, 0xe8, 0xb1, 0xba, 0x83, 0xa0, 0x11, 0xdd, 0x21, 0x50, 0xa9, 0xdb, 0x80, 0x82, 0xcc,
- 0x44, 0x5f, 0x4f, 0xf7, 0x0d, 0x41, 0x71, 0xbd, 0x74, 0x0e, 0x05, 0x17, 0xff, 0x19, 0x14, 0x64,
- 0xb9, 0x95, 0xa4, 0x3f, 0x69, 0x15, 0xa9, 0x21, 0xac, 0x9d, 0x37, 0x84, 0x86, 0xdc, 0x22, 0xa1,
- 0xef, 0x4e, 0x17, 0x6e, 0x91, 0xa0, 0xb7, 0x7f, 0xf9, 0x88, 0xf1, 0xd1, 0xc7, 0x3d, 0x51, 0xcc,
- 0x8b, 0xa3, 0x8d, 0xc5, 0x17, 0xf3, 0xd2, 0xe6, 0x57, 0x88, 0xb8, 0x98, 0x7c, 0xb0, 0x31, 0xfc,
- 0x01, 0x4c, 0xfa, 0x77, 0xe2, 0x21, 0xb0, 0x62, 0xd2, 0xe5, 0x3a, 0xb9, 0x29, 0x60, 0x57, 0xc8,
- 0x3b, 0xb1, 0xb0, 0x8c, 0x5a, 0x47, 0x0d, 0x8f, 0xa3, 0x3d, 0x13, 0xf5, 0x57, 0xe8, 0xd3, 0xc2,
- 0xe0, 0x6b, 0x73, 0xe4, 0xdb, 0x43, 0x34, 0x07, 0xf1, 0xcd, 0xc3, 0xfd, 0xd4, 0x6b, 0xb1, 0xd9,
- 0xc4, 0xcf, 0x01, 0xfd, 0xa5, 0x97, 0x80, 0x1c, 0xff, 0x01, 0x22, 0x1a, 0x8f, 0x30, 0xb6, 0x88,
- 0x32, 0x32, 0xc8, 0xd6, 0xcd, 0x4e, 0xcf, 0xf2, 0xd7, 0x20, 0x2e, 0xf5, 0x03, 0x11, 0x6c, 0xd6,
- 0xe8, 0x8f, 0x7b, 0x94, 0x79, 0x49, 0x35, 0x45, 0xe4, 0xc2, 0x23, 0x1c, 0x23, 0x85, 0xd4, 0xe0,
- 0x48, 0x7c, 0x41, 0x6e, 0xc1, 0x54, 0xff, 0xc3, 0x01, 0x5e, 0xf3, 0x09, 0x23, 0x9f, 0x14, 0x4a,
- 0xc9, 0x26, 0x32, 0xb4, 0x69, 0xc2, 0x8c, 0xe3, 0xb6, 0x45, 0xb6, 0x69, 0x39, 0xae, 0xa1, 0x5c,
- 0x37, 0x33, 0xf2, 0xfa, 0xb9, 0x26, 0x3e, 0xa3, 0xff, 0xe8, 0xfd, 0xb6, 0xe9, 0x1d, 0xf7, 0x9a,
- 0x5c, 0x75, 0xc5, 0xf7, 0x54, 0xff, 0xce, 0xb0, 0xae, 0xbe, 0xb4, 0xb7, 0x1d, 0xd5, 0xf0, 0xc7,
- 0xe1, 0xf9, 0x03, 0x1f, 0xec, 0x59, 0xf0, 0x2a, 0xbb, 0x36, 0x5c, 0x1b, 0xa9, 0x8d, 0xd6, 0xc6,
- 0x6a, 0xe3, 0xb5, 0x89, 0xda, 0x64, 0x73, 0x5c, 0x74, 0xdc, 0xf8, 0x4f, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0xf2, 0x60, 0x62, 0xf5, 0x25, 0x21, 0x00, 0x00,
+ // 2493 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0x5b, 0x73, 0x1b, 0x49,
+ 0x15, 0xb6, 0x7c, 0xf7, 0x91, 0x64, 0x49, 0x2d, 0x5f, 0xb4, 0xb2, 0x9d, 0x38, 0xbd, 0xb9, 0xe1,
+ 0x5d, 0x4b, 0x49, 0xbc, 0x49, 0x41, 0x96, 0xad, 0x25, 0x96, 0x1d, 0x23, 0xe2, 0xd8, 0x62, 0x14,
+ 0x27, 0xc0, 0x6e, 0x4a, 0x35, 0xd2, 0xb4, 0xe5, 0xa9, 0x1d, 0xcd, 0x88, 0xe9, 0x96, 0x82, 0x2b,
+ 0x6c, 0x41, 0x85, 0x6b, 0xf1, 0xc8, 0xfe, 0x05, 0x9e, 0x28, 0xfe, 0x4a, 0x9e, 0xf8, 0x03, 0x14,
+ 0xc5, 0x03, 0x8f, 0x3c, 0x05, 0x1e, 0xa9, 0xbe, 0x8c, 0x34, 0xa3, 0x99, 0xf1, 0x65, 0xd9, 0x2a,
+ 0x9e, 0x62, 0xf5, 0x39, 0xf3, 0x7d, 0x5f, 0x9f, 0x3e, 0xdd, 0xe7, 0x4c, 0x4f, 0xa0, 0xd8, 0x77,
+ 0x2c, 0x76, 0xa2, 0x37, 0xba, 0xae, 0xc3, 0x1c, 0x5a, 0x96, 0xbf, 0x4a, 0xe2, 0x17, 0x9a, 0x96,
+ 0xbf, 0x8a, 0xab, 0x6d, 0xc7, 0x69, 0x5b, 0xa4, 0xac, 0x77, 0xcd, 0xb2, 0x6e, 0xdb, 0x0e, 0xd3,
+ 0x99, 0xe9, 0xd8, 0x54, 0x7a, 0x15, 0x57, 0x94, 0x55, 0xfc, 0x6a, 0xf6, 0x8e, 0xcb, 0xa4, 0xd3,
+ 0x65, 0xa7, 0xca, 0x58, 0x08, 0xc2, 0x77, 0x08, 0x53, 0xe0, 0xc5, 0x11, 0xe2, 0x96, 0xd3, 0xe9,
+ 0x38, 0x76, 0xb4, 0xed, 0x84, 0xe8, 0x16, 0x3b, 0x51, 0x36, 0x1c, 0xb4, 0x59, 0x4e, 0xdb, 0x6c,
+ 0xe9, 0x56, 0xc3, 0x20, 0x7d, 0xb3, 0x45, 0xa2, 0x9f, 0x0f, 0xd8, 0x56, 0x82, 0x36, 0xdd, 0xd0,
+ 0xbb, 0x8c, 0xb8, 0xca, 0x78, 0x35, 0x68, 0x74, 0xba, 0xc4, 0x3e, 0xb6, 0x9c, 0x57, 0x8d, 0xbb,
+ 0x5b, 0x31, 0x0e, 0x9d, 0x96, 0xd9, 0xe8, 0x98, 0xcd, 0x86, 0xd1, 0x54, 0x0e, 0xd7, 0x22, 0x1c,
+ 0x74, 0x4b, 0x77, 0x3b, 0x43, 0x97, 0xf5, 0xa0, 0xcb, 0xa9, 0x6e, 0xb7, 0x1b, 0x4e, 0xd7, 0x17,
+ 0x52, 0xfc, 0xa7, 0x04, 0x24, 0x77, 0x84, 0xe8, 0x3d, 0xd7, 0xe9, 0x75, 0xd1, 0x22, 0x8c, 0x9b,
+ 0x46, 0x21, 0xb1, 0x9e, 0xb8, 0x3d, 0xb7, 0x3d, 0xf5, 0xcf, 0x77, 0x6f, 0xd7, 0x12, 0xda, 0xb8,
+ 0x69, 0xa0, 0x2a, 0x64, 0x82, 0xd3, 0xa7, 0x85, 0xf1, 0xf5, 0x89, 0xdb, 0xc9, 0x7b, 0x8b, 0x25,
+ 0xb5, 0x8e, 0xfb, 0xd2, 0x2c, 0xb1, 0xb6, 0xe7, 0xfe, 0xfe, 0xee, 0xed, 0xda, 0x24, 0xc7, 0xd2,
+ 0xe6, 0x2d, 0xbf, 0x85, 0xa2, 0x2d, 0x98, 0xf1, 0x20, 0x26, 0x04, 0xc4, 0xbc, 0x07, 0x11, 0x7e,
+ 0xd6, 0xf3, 0xc4, 0xdf, 0x81, 0x94, 0x4f, 0x25, 0x45, 0xdf, 0x82, 0x29, 0x93, 0x91, 0x0e, 0x2d,
+ 0x24, 0x04, 0x44, 0x3e, 0x08, 0x21, 0x9c, 0x34, 0xe9, 0x81, 0x7f, 0x01, 0xe8, 0x11, 0x8f, 0xca,
+ 0x63, 0xd3, 0x62, 0xc4, 0xd5, 0x7a, 0x16, 0x79, 0x42, 0x4e, 0x71, 0x33, 0x6a, 0x14, 0x4d, 0x73,
+ 0xd6, 0xec, 0x18, 0x9a, 0x85, 0x49, 0x76, 0xda, 0x25, 0xd9, 0x04, 0x4a, 0xc1, 0x2c, 0x25, 0x7d,
+ 0xe2, 0x9a, 0xec, 0x34, 0x3b, 0x8e, 0x32, 0x90, 0x74, 0x09, 0x75, 0x7a, 0x6e, 0x8b, 0x34, 0x4c,
+ 0x23, 0x3b, 0xc1, 0xcd, 0x2d, 0x9d, 0x91, 0xb6, 0xe3, 0x9e, 0x66, 0x27, 0x51, 0x1a, 0xe6, 0xa4,
+ 0x60, 0x6e, 0x9c, 0x7a, 0x38, 0xf5, 0xaf, 0x77, 0x6f, 0xd7, 0xc6, 0xf0, 0x09, 0x64, 0x46, 0xa8,
+ 0xd0, 0xa7, 0x30, 0xf1, 0x05, 0x39, 0x15, 0x61, 0x9e, 0xbf, 0xb7, 0xe9, 0x89, 0x0f, 0x0b, 0x8a,
+ 0x18, 0xd2, 0xf8, 0x93, 0x68, 0x01, 0xa6, 0xfa, 0xba, 0xd5, 0x23, 0x85, 0x71, 0xbe, 0x52, 0x9a,
+ 0xfc, 0x81, 0xeb, 0x90, 0xf4, 0x3d, 0x10, 0xb7, 0x96, 0x9b, 0x30, 0xe5, 0xf6, 0xac, 0xc1, 0x0a,
+ 0x2e, 0xc7, 0xd0, 0x6b, 0xd2, 0x0b, 0x7f, 0x02, 0x29, 0x9f, 0x85, 0xa2, 0x4d, 0x98, 0x39, 0x96,
+ 0x7f, 0x8e, 0x06, 0xdf, 0x0f, 0xe0, 0xf9, 0x60, 0x17, 0x52, 0x15, 0xc7, 0x25, 0x55, 0x9b, 0x32,
+ 0xdd, 0x6e, 0x11, 0x74, 0x13, 0x92, 0xa6, 0xfa, 0xbb, 0x31, 0xaa, 0x0e, 0x3c, 0x4b, 0xd5, 0x40,
+ 0x5b, 0x30, 0x2d, 0x37, 0xa3, 0x98, 0x62, 0xf2, 0xde, 0x82, 0xc7, 0xf2, 0x7d, 0x31, 0x5a, 0x67,
+ 0x3a, 0xeb, 0xd1, 0xed, 0x29, 0x9e, 0x2b, 0x63, 0x9a, 0x72, 0x7d, 0x38, 0xf5, 0x1f, 0x8e, 0x83,
+ 0xb7, 0x21, 0xed, 0xe7, 0xa4, 0x68, 0x23, 0x98, 0x2e, 0x03, 0x2c, 0xbf, 0x97, 0xca, 0x17, 0x0f,
+ 0xe3, 0x6f, 0x93, 0x30, 0xfd, 0x5c, 0x78, 0xa1, 0xab, 0x30, 0xd3, 0x27, 0x2e, 0x35, 0x1d, 0x3b,
+ 0x28, 0xd7, 0x1b, 0x45, 0x0f, 0x60, 0x56, 0x6d, 0x6e, 0x2f, 0xa8, 0x99, 0x41, 0x4c, 0xe4, 0xb8,
+ 0x3f, 0xa9, 0x07, 0xbe, 0x51, 0xbb, 0x6a, 0xe2, 0x7f, 0xdf, 0x55, 0x93, 0x17, 0xdd, 0x55, 0xe8,
+ 0x7b, 0x90, 0x52, 0xf9, 0xca, 0xb3, 0x9d, 0x16, 0xa6, 0xc4, 0x93, 0x28, 0xf8, 0xe4, 0xb3, 0xd3,
+ 0x6e, 0xe0, 0xe9, 0xa4, 0x31, 0x18, 0xa6, 0xa8, 0x02, 0x69, 0x85, 0xd0, 0x16, 0x1b, 0xb3, 0x30,
+ 0x1d, 0xbb, 0x1f, 0xfd, 0x18, 0x8a, 0x56, 0x6d, 0xe6, 0x0a, 0xa4, 0xe5, 0xb9, 0xe5, 0xe5, 0xd5,
+ 0x4c, 0x6c, 0x5e, 0x05, 0x40, 0x74, 0x7f, 0x5a, 0xfe, 0x10, 0x72, 0xc3, 0x23, 0x52, 0x67, 0x7a,
+ 0x53, 0xa7, 0xa4, 0xb0, 0xaa, 0x80, 0xb8, 0xa5, 0xf4, 0xd4, 0x6c, 0x4a, 0x39, 0x3b, 0x3a, 0xd3,
+ 0xb7, 0xb3, 0x1c, 0x28, 0xe9, 0xdb, 0xb0, 0x5a, 0x86, 0x7b, 0x71, 0x27, 0xf5, 0x34, 0x7a, 0x01,
+ 0x79, 0xff, 0xa1, 0xea, 0x81, 0xae, 0xa9, 0x25, 0x12, 0xa0, 0x42, 0xdb, 0x99, 0xb0, 0x42, 0x96,
+ 0x74, 0x53, 0x08, 0x5e, 0x8a, 0xfd, 0x25, 0x01, 0xd9, 0x3a, 0xb1, 0x8e, 0x9f, 0x11, 0xca, 0x34,
+ 0x42, 0xbb, 0x8e, 0x4d, 0xf9, 0xd1, 0x30, 0xed, 0x12, 0xda, 0xb3, 0x98, 0x3a, 0x1d, 0x6e, 0x79,
+ 0x51, 0x18, 0xf5, 0xf4, 0x0f, 0xf4, 0x2c, 0xa6, 0xa9, 0xc7, 0x70, 0x0d, 0xe6, 0x83, 0x16, 0x94,
+ 0x84, 0x99, 0xfa, 0x51, 0xa5, 0xb2, 0x5b, 0xaf, 0x67, 0xc7, 0xf8, 0x8f, 0xc7, 0x8f, 0xaa, 0xfb,
+ 0x47, 0xda, 0x6e, 0x36, 0x81, 0x72, 0x90, 0x3e, 0x38, 0x7c, 0xd6, 0xa8, 0x1f, 0xd5, 0x6a, 0x87,
+ 0xda, 0xb3, 0xdd, 0x9d, 0xec, 0x38, 0x1f, 0x3a, 0x3a, 0x78, 0x72, 0x70, 0xf8, 0xe2, 0xa0, 0xb1,
+ 0xab, 0x69, 0x87, 0x5a, 0x76, 0xc2, 0x3b, 0xc7, 0x0e, 0x21, 0x77, 0x78, 0xfc, 0xa8, 0x4d, 0x6c,
+ 0x56, 0xef, 0x35, 0x69, 0xcb, 0x35, 0x9b, 0xc4, 0x45, 0x6b, 0x00, 0xce, 0xb1, 0xce, 0x07, 0x07,
+ 0xbb, 0x59, 0x9b, 0x53, 0x23, 0x55, 0x03, 0xad, 0xc0, 0x9c, 0x2a, 0x41, 0xa6, 0xa1, 0xce, 0xaa,
+ 0x59, 0x39, 0x50, 0x35, 0xf0, 0xc7, 0x00, 0x4f, 0x49, 0xa7, 0x49, 0x5c, 0x7a, 0x62, 0x76, 0x39,
+ 0x92, 0xc8, 0xa1, 0x86, 0xad, 0x77, 0x88, 0x87, 0x24, 0x46, 0x0e, 0xf4, 0x0e, 0x41, 0xf3, 0xe2,
+ 0x30, 0x93, 0x10, 0xe3, 0xa6, 0x81, 0x77, 0x21, 0xf5, 0xd8, 0x72, 0x5e, 0x3d, 0x25, 0x4c, 0xe7,
+ 0x2b, 0x83, 0xee, 0xc3, 0x74, 0x87, 0xf8, 0x4e, 0xa5, 0xb5, 0x92, 0xbf, 0xa4, 0x3a, 0xc7, 0xdd,
+ 0x86, 0x30, 0x37, 0x5a, 0x8e, 0x7d, 0x6c, 0xb6, 0x35, 0xe5, 0x7c, 0xef, 0xcd, 0x1d, 0x48, 0xcb,
+ 0x6d, 0x5e, 0x27, 0x2e, 0x5f, 0x32, 0xa4, 0xc1, 0xfc, 0x51, 0xd7, 0xd0, 0x19, 0xd9, 0x77, 0xda,
+ 0xfb, 0xa4, 0x4f, 0x2c, 0x94, 0x29, 0xa9, 0x96, 0x61, 0xdf, 0x69, 0xb7, 0x4d, 0xbb, 0x5d, 0x5c,
+ 0x2a, 0xc9, 0x46, 0xa4, 0xe4, 0x35, 0x22, 0xa5, 0x5d, 0xde, 0x88, 0xe0, 0xe5, 0x37, 0x7f, 0xfd,
+ 0xc7, 0x57, 0xe3, 0x39, 0x9c, 0x12, 0xfd, 0x4b, 0xff, 0x2e, 0x6f, 0x19, 0xe8, 0xc3, 0xc4, 0x06,
+ 0xaa, 0x41, 0x6a, 0x8f, 0x30, 0x0f, 0x90, 0xa2, 0xc2, 0x08, 0x62, 0xc5, 0xe9, 0x74, 0x1d, 0x9b,
+ 0xd8, 0xac, 0x98, 0x1d, 0xb1, 0x50, 0xbc, 0x20, 0x40, 0xe7, 0x51, 0x00, 0x14, 0xbd, 0x80, 0xf4,
+ 0x1e, 0x61, 0xbe, 0xf0, 0xc5, 0x68, 0x2a, 0x0e, 0x76, 0xf3, 0xd0, 0x17, 0x17, 0x05, 0xe4, 0x02,
+ 0x42, 0x1e, 0x64, 0x67, 0x88, 0xf3, 0x12, 0xb2, 0x72, 0xfa, 0x3e, 0xec, 0x08, 0x8c, 0xd8, 0x18,
+ 0xac, 0x09, 0xec, 0x65, 0x1c, 0x81, 0xcd, 0x23, 0xb1, 0x03, 0x73, 0x7b, 0x84, 0xa9, 0x83, 0x35,
+ 0x4e, 0xf3, 0xe0, 0xec, 0x92, 0x7e, 0x38, 0x23, 0x30, 0xe7, 0xd0, 0x8c, 0xc2, 0x44, 0x3d, 0xc8,
+ 0xed, 0x9b, 0x94, 0x05, 0x0f, 0xf9, 0x38, 0xb4, 0xc5, 0xa8, 0xd3, 0x9e, 0xe2, 0xbb, 0x7f, 0xf8,
+ 0xf7, 0xdb, 0xb5, 0x19, 0x55, 0x18, 0xc4, 0xdf, 0x48, 0xfe, 0x2d, 0xc8, 0xf2, 0x28, 0xe7, 0x4d,
+ 0xc0, 0x1c, 0x30, 0xd4, 0x21, 0xb3, 0x47, 0x02, 0xac, 0x08, 0xbc, 0xf5, 0xaa, 0xee, 0x14, 0x23,
+ 0xcb, 0x0a, 0xbe, 0x22, 0xf0, 0x0a, 0x68, 0x29, 0x84, 0x57, 0x7e, 0x6d, 0x1a, 0x5f, 0x22, 0x1d,
+ 0x52, 0x7c, 0x2e, 0x8f, 0xbc, 0xa2, 0x10, 0x37, 0x8d, 0xec, 0x48, 0x49, 0xa1, 0xf8, 0x16, 0x57,
+ 0x0d, 0xc3, 0xda, 0x23, 0x88, 0x10, 0xca, 0x7a, 0x44, 0x83, 0x3a, 0xf3, 0x1a, 0x10, 0xa7, 0xd8,
+ 0x0f, 0x96, 0x8c, 0x38, 0xa2, 0xa5, 0xc8, 0xe2, 0x43, 0xf1, 0x7d, 0x4e, 0x97, 0x0b, 0x95, 0x2c,
+ 0xc1, 0xfa, 0x1e, 0x5a, 0xf6, 0xa5, 0xa7, 0xdf, 0x8c, 0x3e, 0x83, 0xac, 0xcc, 0xfd, 0x21, 0x56,
+ 0x20, 0x6a, 0xd1, 0xb5, 0x0e, 0x5f, 0x17, 0xb8, 0x57, 0xd0, 0x6a, 0x0c, 0xae, 0x0c, 0x9e, 0x0b,
+ 0x4b, 0xa1, 0x99, 0xd5, 0x1c, 0x97, 0xd1, 0xe8, 0x85, 0x51, 0x7e, 0xc2, 0x03, 0x3f, 0x50, 0x09,
+ 0xd0, 0xe5, 0xbf, 0x04, 0xdb, 0x75, 0x84, 0xcf, 0x62, 0x2b, 0x0b, 0x4f, 0xf4, 0xab, 0x04, 0x2c,
+ 0x8c, 0xce, 0x88, 0x23, 0xa2, 0xc5, 0x08, 0x9a, 0xaa, 0x51, 0xcc, 0x47, 0x0c, 0xe3, 0x4f, 0x39,
+ 0xf9, 0x34, 0x4c, 0x72, 0x48, 0xc1, 0x5d, 0x42, 0x1f, 0x9e, 0xcf, 0x5d, 0x7e, 0xcd, 0xff, 0x69,
+ 0xf0, 0x99, 0xff, 0x26, 0x01, 0xcb, 0xbb, 0xb6, 0xde, 0xb4, 0xc8, 0x85, 0x85, 0xc4, 0x6d, 0xd9,
+ 0x8f, 0x85, 0x80, 0xfb, 0x78, 0xeb, 0x32, 0x02, 0xca, 0x44, 0x90, 0xa3, 0xdf, 0x25, 0xa0, 0xb0,
+ 0x63, 0xd2, 0x6f, 0x44, 0xc8, 0x77, 0x85, 0x90, 0x07, 0xf8, 0xa3, 0x4b, 0x09, 0x31, 0x24, 0x3b,
+ 0xfa, 0x79, 0x44, 0x2e, 0xf0, 0x12, 0x11, 0xcc, 0x05, 0x14, 0xa8, 0x0b, 0xc2, 0x8e, 0xb7, 0x55,
+ 0x26, 0xf0, 0xd1, 0xf0, 0x51, 0x70, 0x5e, 0x56, 0x88, 0xa7, 0x78, 0x56, 0xac, 0x0e, 0xea, 0x46,
+ 0x50, 0xc0, 0x33, 0x21, 0x6f, 0x35, 0x44, 0x2c, 0xc6, 0xe5, 0x33, 0xb1, 0x21, 0xd9, 0x14, 0x12,
+ 0x6e, 0xe1, 0x0b, 0x48, 0xe0, 0xc7, 0xeb, 0xaf, 0x13, 0xb0, 0x16, 0xa1, 0xe2, 0x29, 0x2f, 0x76,
+ 0x52, 0xc6, 0x4a, 0x40, 0x86, 0x30, 0x3c, 0x75, 0x8c, 0x73, 0x54, 0x94, 0x84, 0x8a, 0xdb, 0xf8,
+ 0xfd, 0x33, 0x55, 0xc8, 0x92, 0xca, 0x65, 0xfc, 0x32, 0x01, 0xcb, 0xa1, 0xb5, 0x10, 0x5c, 0xc1,
+ 0xc5, 0xc8, 0x87, 0xc5, 0x50, 0xbc, 0xc3, 0x57, 0x60, 0xd6, 0x2b, 0xe7, 0xa1, 0xe5, 0xb8, 0x81,
+ 0x2e, 0xa2, 0x02, 0xfd, 0x31, 0x01, 0x2b, 0x91, 0xe9, 0xa0, 0x9a, 0x4e, 0xbf, 0x8c, 0xe5, 0xd0,
+ 0xd2, 0x48, 0x27, 0x7c, 0xc0, 0xd9, 0xd3, 0x90, 0x14, 0x26, 0xd9, 0xe1, 0x86, 0xf4, 0x6c, 0xa0,
+ 0xdb, 0xe7, 0xae, 0x8d, 0x7a, 0x16, 0x7d, 0x95, 0x80, 0x6b, 0x31, 0x49, 0x22, 0x18, 0xe5, 0x12,
+ 0x5d, 0x8b, 0x96, 0x73, 0x91, 0x74, 0xd9, 0x12, 0x92, 0x36, 0xf1, 0x85, 0x25, 0xf1, 0xd5, 0x7a,
+ 0x09, 0x49, 0x1e, 0xa9, 0xf3, 0xea, 0x42, 0x26, 0xd8, 0xd4, 0x53, 0x7c, 0x83, 0xc7, 0x62, 0x6e,
+ 0xf0, 0xe2, 0x21, 0x9b, 0x1f, 0x94, 0xf1, 0xa8, 0xbd, 0x02, 0x60, 0x40, 0x7a, 0x08, 0x5f, 0x35,
+ 0xe2, 0x09, 0x92, 0xc3, 0x25, 0xa1, 0xb8, 0xc4, 0xc1, 0x45, 0xa3, 0x7f, 0x56, 0x6d, 0x96, 0x1c,
+ 0xa6, 0x41, 0xd1, 0x11, 0x64, 0x35, 0xd2, 0x72, 0xec, 0x96, 0x69, 0x11, 0x6f, 0x26, 0x7e, 0xc0,
+ 0xd8, 0x90, 0xad, 0x0a, 0xcc, 0x25, 0x1c, 0xc6, 0xe4, 0xb1, 0xd9, 0x15, 0xfd, 0x4a, 0x44, 0xd9,
+ 0x1a, 0x79, 0xbf, 0xf2, 0x60, 0xd0, 0xc2, 0xc8, 0xf4, 0x65, 0x9d, 0xfa, 0x01, 0xa4, 0x2a, 0x2e,
+ 0xd1, 0x99, 0x92, 0x86, 0x46, 0x9e, 0x0e, 0xa1, 0xa9, 0x0e, 0x0d, 0x8f, 0x06, 0x93, 0x4b, 0x7a,
+ 0x01, 0x29, 0x79, 0xf0, 0x47, 0xa8, 0x8a, 0x9b, 0xe4, 0xfb, 0x02, 0x6f, 0x0d, 0xaf, 0x44, 0xa9,
+ 0xf3, 0x8e, 0xf2, 0x1f, 0x43, 0x5a, 0x9d, 0xe4, 0x97, 0x40, 0x56, 0x75, 0x1a, 0xaf, 0x46, 0x22,
+ 0x7b, 0x67, 0xf3, 0x0b, 0x48, 0x69, 0xa4, 0xe9, 0x38, 0xec, 0x1b, 0xd3, 0xec, 0x0a, 0x38, 0x0e,
+ 0xbc, 0x43, 0x2c, 0xc2, 0xbe, 0x46, 0x30, 0x36, 0xa2, 0x81, 0x0d, 0x01, 0x87, 0x7a, 0x90, 0xde,
+ 0x71, 0x5e, 0xd9, 0x96, 0xa3, 0x1b, 0xd5, 0x8e, 0xde, 0x26, 0xc3, 0x5a, 0x26, 0x7e, 0x7a, 0xb6,
+ 0xe2, 0xa2, 0x47, 0x78, 0xd8, 0x25, 0xae, 0xb8, 0xad, 0xe4, 0x2f, 0x68, 0xf8, 0x81, 0xe0, 0xb8,
+ 0x83, 0x3f, 0x88, 0xe4, 0x30, 0x39, 0x44, 0xc3, 0x50, 0x18, 0xb4, 0xfc, 0x9a, 0xbf, 0xf3, 0x7c,
+ 0xc9, 0x17, 0xf7, 0x4d, 0x02, 0x96, 0xf6, 0x08, 0x0b, 0x70, 0xc8, 0xbb, 0x8e, 0x78, 0x01, 0x51,
+ 0xc3, 0xf8, 0xa1, 0x10, 0xf0, 0x11, 0xba, 0x77, 0x09, 0x01, 0x65, 0x2a, 0x99, 0x7a, 0xa2, 0x65,
+ 0x0b, 0xe0, 0x5d, 0x92, 0x5d, 0x9d, 0x43, 0xe8, 0x32, 0xd3, 0x47, 0xc7, 0xb2, 0x4d, 0x0d, 0x20,
+ 0xd1, 0x91, 0x15, 0x8d, 0x62, 0xa3, 0xf8, 0x43, 0x41, 0x77, 0x13, 0x5d, 0xbf, 0x08, 0x1d, 0xfa,
+ 0x19, 0xe4, 0x2b, 0xbc, 0x01, 0xb7, 0x2e, 0x38, 0xc3, 0xc8, 0x05, 0x56, 0x33, 0xdc, 0xb8, 0xd4,
+ 0x0c, 0x7f, 0x9f, 0x80, 0xfc, 0xa3, 0x16, 0x33, 0xfb, 0x3a, 0x23, 0x82, 0x45, 0x1e, 0xe7, 0x97,
+ 0xa4, 0xae, 0x08, 0xea, 0x4f, 0xf0, 0xb7, 0x2f, 0xb3, 0xb4, 0x72, 0xb8, 0x27, 0xf8, 0x78, 0xa2,
+ 0xfd, 0x36, 0x01, 0x39, 0x8d, 0xf4, 0x89, 0xcb, 0xfe, 0x2f, 0x42, 0x5c, 0x41, 0xcd, 0x85, 0x7c,
+ 0x0e, 0x99, 0x61, 0x79, 0x08, 0xf7, 0xee, 0x69, 0x4f, 0x91, 0x6c, 0xda, 0x4b, 0xa1, 0xa6, 0x7d,
+ 0x15, 0x15, 0x23, 0xe9, 0x65, 0xb3, 0xfe, 0x12, 0xf2, 0x3e, 0xf4, 0x4e, 0x45, 0xbc, 0xfd, 0x07,
+ 0x19, 0x72, 0x03, 0x06, 0xcf, 0x8c, 0x6f, 0x09, 0xe4, 0x6b, 0xe8, 0x6a, 0x34, 0x72, 0x47, 0xdd,
+ 0x22, 0x50, 0x64, 0xc3, 0xa2, 0x8c, 0xdc, 0x28, 0x41, 0x18, 0x34, 0xf6, 0x38, 0xda, 0x90, 0x5d,
+ 0x26, 0x3e, 0x8f, 0x8c, 0x07, 0xab, 0xe3, 0x0f, 0xd6, 0xc5, 0x9a, 0xdb, 0x87, 0x67, 0x36, 0xb7,
+ 0x71, 0xd1, 0x1b, 0x34, 0xb5, 0x0b, 0x41, 0xbe, 0xcb, 0x74, 0x4f, 0x8f, 0x2f, 0xd0, 0x3d, 0x61,
+ 0xb4, 0x1e, 0xcb, 0xef, 0x75, 0x4d, 0x8e, 0x7f, 0xd2, 0xf2, 0xde, 0x31, 0xae, 0x85, 0xc8, 0x87,
+ 0xef, 0x2e, 0x29, 0x2e, 0x73, 0xd6, 0xf9, 0xe0, 0x5d, 0x67, 0x74, 0xb5, 0x96, 0x36, 0xa4, 0x89,
+ 0xcb, 0x95, 0x21, 0xc4, 0x48, 0x8c, 0x43, 0x14, 0xf8, 0x9a, 0x80, 0x5b, 0x41, 0xef, 0x45, 0xc1,
+ 0xc9, 0x0e, 0x80, 0x42, 0x76, 0x38, 0x09, 0x15, 0xc5, 0xb8, 0x59, 0x2c, 0x44, 0x5c, 0x9f, 0xaa,
+ 0x0b, 0x8b, 0xcc, 0xc8, 0x85, 0xab, 0xbc, 0x6d, 0x41, 0x8b, 0x23, 0xc4, 0x2a, 0x72, 0x8f, 0x21,
+ 0x5b, 0x67, 0x2e, 0xd1, 0x3b, 0x35, 0xbd, 0xf5, 0x05, 0x61, 0xf4, 0xb0, 0xc7, 0xd0, 0x52, 0x60,
+ 0xb9, 0xa4, 0xe1, 0xb0, 0xc7, 0x62, 0xd3, 0x73, 0xec, 0x76, 0x02, 0xed, 0x8a, 0xe6, 0x8a, 0x98,
+ 0x7d, 0xa2, 0x80, 0xaa, 0xf6, 0x19, 0xd7, 0x2d, 0x61, 0xfc, 0xaa, 0x8d, 0xc7, 0xee, 0x24, 0xd0,
+ 0x13, 0xc8, 0x2b, 0x98, 0xca, 0x89, 0x6e, 0xb7, 0xc9, 0x6e, 0x9f, 0xd8, 0x2c, 0x3e, 0x0c, 0x85,
+ 0x00, 0x92, 0xef, 0x11, 0x01, 0x76, 0x04, 0xf3, 0x83, 0x45, 0x92, 0xdf, 0xae, 0x82, 0x6f, 0x16,
+ 0xe1, 0x10, 0x62, 0x1c, 0x9d, 0xf2, 0x2a, 0x5a, 0x72, 0x9d, 0x1a, 0x90, 0x93, 0x9d, 0x9a, 0xff,
+ 0x4b, 0x4a, 0xd4, 0x55, 0x74, 0x31, 0x6a, 0x10, 0xaf, 0x0b, 0x8a, 0x22, 0x1e, 0x2c, 0x48, 0xe0,
+ 0x66, 0x9b, 0x6f, 0x61, 0xa9, 0xdb, 0x8f, 0x1e, 0xa9, 0xdb, 0x0f, 0x1a, 0xd2, 0x1d, 0x00, 0x95,
+ 0xba, 0x0d, 0xc8, 0xc9, 0x93, 0xe8, 0xeb, 0xe9, 0xbe, 0x21, 0x28, 0xae, 0x16, 0xcf, 0xa0, 0xe0,
+ 0xe2, 0x3f, 0x83, 0x9c, 0x6c, 0xb7, 0xe2, 0xf4, 0xc7, 0x65, 0x91, 0x9a, 0xc2, 0xc6, 0x59, 0x53,
+ 0x68, 0xc8, 0x2d, 0x12, 0xf8, 0xda, 0x74, 0xee, 0x16, 0xf1, 0x7b, 0x7b, 0x97, 0x8f, 0x28, 0x3a,
+ 0xfa, 0x68, 0x5f, 0x34, 0xf3, 0xa2, 0xb4, 0xd1, 0xe8, 0x66, 0x5e, 0xda, 0xbc, 0x0e, 0x11, 0xad,
+ 0xc4, 0x17, 0x36, 0x8a, 0x7e, 0x04, 0xb3, 0xde, 0x45, 0x7b, 0x00, 0xac, 0x10, 0x77, 0x63, 0x8f,
+ 0x6f, 0x0a, 0xd8, 0x75, 0x7c, 0x25, 0x12, 0x96, 0x12, 0xeb, 0xb8, 0xc1, 0x38, 0xda, 0x73, 0xd1,
+ 0x7f, 0x05, 0xbe, 0x57, 0x8c, 0xbe, 0x36, 0x87, 0x3e, 0x68, 0x84, 0xcf, 0x20, 0xbe, 0x79, 0xb8,
+ 0x9f, 0x7a, 0x2d, 0x36, 0x9b, 0xe8, 0x73, 0x40, 0x5e, 0xea, 0xc5, 0x20, 0x47, 0x7f, 0xd5, 0x08,
+ 0xc7, 0x23, 0x88, 0x2d, 0xa2, 0x8c, 0x28, 0xa4, 0xeb, 0x66, 0xa7, 0x67, 0x79, 0x39, 0x88, 0x56,
+ 0x07, 0x81, 0xf0, 0x0f, 0x6b, 0xe4, 0xa7, 0x3d, 0x42, 0x59, 0x5c, 0x4f, 0x11, 0xba, 0xf0, 0x08,
+ 0xc6, 0x48, 0x21, 0x35, 0x38, 0x12, 0x4f, 0xc8, 0x0a, 0xcc, 0x0d, 0xbe, 0x46, 0xa0, 0xf7, 0x3c,
+ 0xc2, 0xd0, 0x77, 0x8a, 0x62, 0xbc, 0x09, 0x8f, 0x6d, 0x9b, 0x90, 0x77, 0xdc, 0xb6, 0x38, 0x6d,
+ 0x5a, 0x8e, 0x6b, 0x28, 0xd7, 0xed, 0x94, 0xbc, 0x7e, 0xae, 0x89, 0x8f, 0xe7, 0x3f, 0xf9, 0xa0,
+ 0x6d, 0xb2, 0x93, 0x5e, 0x93, 0xab, 0x2e, 0x7b, 0x9e, 0xea, 0x3f, 0x31, 0x6c, 0xaa, 0xef, 0xeb,
+ 0x6d, 0x47, 0x0d, 0xfc, 0x79, 0x7c, 0xe9, 0xd0, 0x03, 0x7b, 0xee, 0xbf, 0xca, 0xae, 0x8d, 0xd7,
+ 0x26, 0x6a, 0x93, 0xb5, 0xa9, 0xda, 0x74, 0x6d, 0xa6, 0x36, 0xdb, 0x9c, 0x16, 0x0f, 0x6e, 0xfd,
+ 0x37, 0x00, 0x00, 0xff, 0xff, 0x23, 0x82, 0x59, 0x22, 0x1b, 0x21, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -2203,7 +2163,8 @@
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type VolthaServiceClient interface {
// Get more information on a given physical device
- UpdateLogLevel(ctx context.Context, in *Logging, opts ...grpc.CallOption) (*empty.Empty, error)
+ UpdateLogLevel(ctx context.Context, in *common.Logging, opts ...grpc.CallOption) (*empty.Empty, error)
+ GetLogLevels(ctx context.Context, in *common.LoggingComponent, opts ...grpc.CallOption) (*common.Loggings, error)
// Get the membership group of a Voltha Core
GetMembership(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Membership, error)
// Set the membership group of a Voltha Core
@@ -2336,7 +2297,7 @@
return &volthaServiceClient{cc}
}
-func (c *volthaServiceClient) UpdateLogLevel(ctx context.Context, in *Logging, opts ...grpc.CallOption) (*empty.Empty, error) {
+func (c *volthaServiceClient) UpdateLogLevel(ctx context.Context, in *common.Logging, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateLogLevel", in, out, opts...)
if err != nil {
@@ -2345,6 +2306,15 @@
return out, nil
}
+func (c *volthaServiceClient) GetLogLevels(ctx context.Context, in *common.LoggingComponent, opts ...grpc.CallOption) (*common.Loggings, error) {
+ out := new(common.Loggings)
+ err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetLogLevels", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
func (c *volthaServiceClient) GetMembership(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Membership, error) {
out := new(Membership)
err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetMembership", in, out, opts...)
@@ -2932,7 +2902,8 @@
// VolthaServiceServer is the server API for VolthaService service.
type VolthaServiceServer interface {
// Get more information on a given physical device
- UpdateLogLevel(context.Context, *Logging) (*empty.Empty, error)
+ UpdateLogLevel(context.Context, *common.Logging) (*empty.Empty, error)
+ GetLogLevels(context.Context, *common.LoggingComponent) (*common.Loggings, error)
// Get the membership group of a Voltha Core
GetMembership(context.Context, *empty.Empty) (*Membership, error)
// Set the membership group of a Voltha Core
@@ -3062,7 +3033,7 @@
}
func _VolthaService_UpdateLogLevel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Logging)
+ in := new(common.Logging)
if err := dec(in); err != nil {
return nil, err
}
@@ -3074,7 +3045,25 @@
FullMethod: "/voltha.VolthaService/UpdateLogLevel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(VolthaServiceServer).UpdateLogLevel(ctx, req.(*Logging))
+ return srv.(VolthaServiceServer).UpdateLogLevel(ctx, req.(*common.Logging))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetLogLevels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(common.LoggingComponent)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(VolthaServiceServer).GetLogLevels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/voltha.VolthaService/GetLogLevels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(VolthaServiceServer).GetLogLevels(ctx, req.(*common.LoggingComponent))
}
return interceptor(ctx, in, info, handler)
}
@@ -4128,6 +4117,10 @@
Handler: _VolthaService_UpdateLogLevel_Handler,
},
{
+ MethodName: "GetLogLevels",
+ Handler: _VolthaService_GetLogLevels_Handler,
+ },
+ {
MethodName: "GetMembership",
Handler: _VolthaService_GetMembership_Handler,
},
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/.gitignore b/vendor/gopkg.in/Shopify/sarama.v1/.gitignore
deleted file mode 100644
index 6e362e4..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/.gitignore
+++ /dev/null
@@ -1,27 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-*.test
-
-# Folders
-_obj
-_test
-.vagrant
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-coverage.txt
-profile.out
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/.travis.yml b/vendor/gopkg.in/Shopify/sarama.v1/.travis.yml
deleted file mode 100644
index 4331fa1..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/.travis.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-dist: xenial
-language: go
-go:
-- 1.11.x
-- 1.12.x
-
-env:
- global:
- - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- - TOXIPROXY_ADDR=http://localhost:8474
- - KAFKA_INSTALL_ROOT=/home/travis/kafka
- - KAFKA_HOSTNAME=localhost
- - DEBUG=true
- matrix:
- - KAFKA_VERSION=2.1.1 KAFKA_SCALA_VERSION=2.12
- - KAFKA_VERSION=2.2.1 KAFKA_SCALA_VERSION=2.12
- - KAFKA_VERSION=2.3.0 KAFKA_SCALA_VERSION=2.12
-
-before_install:
-- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
-- vagrant/install_cluster.sh
-- vagrant/boot_cluster.sh
-- vagrant/create_topics.sh
-- vagrant/run_java_producer.sh
-
-install: make install_dependencies
-
-script:
-- make test
-- make vet
-- make errcheck
-- if [[ "$TRAVIS_GO_VERSION" == 1.12* ]]; then make fmt; fi
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
-
-after_script: vagrant/halt_cluster.sh
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/CHANGELOG.md b/vendor/gopkg.in/Shopify/sarama.v1/CHANGELOG.md
deleted file mode 100644
index 4f89e21..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/CHANGELOG.md
+++ /dev/null
@@ -1,779 +0,0 @@
-# Changelog
-
-#### Version 1.23.0 (2019-07-02)
-
-New Features:
-- Add support for Kafka 2.3.0
- ([1418](https://github.com/Shopify/sarama/pull/1418)).
-- Add support for ListConsumerGroupOffsets v2
- ([1374](https://github.com/Shopify/sarama/pull/1374)).
-- Add support for DeleteConsumerGroup
- ([1417](https://github.com/Shopify/sarama/pull/1417)).
-- Add support for SASLVersion configuration
- ([1410](https://github.com/Shopify/sarama/pull/1410)).
-- Add kerberos support
- ([1366](https://github.com/Shopify/sarama/pull/1366)).
-
-Improvements:
-- Improve sasl_scram_client example
- ([1406](https://github.com/Shopify/sarama/pull/1406)).
-- Fix shutdown and race-condition in consumer-group example
- ([1404](https://github.com/Shopify/sarama/pull/1404)).
-- Add support for error codes 77—81
- ([1397](https://github.com/Shopify/sarama/pull/1397)).
-- Pool internal objects allocated per message
- ([1385](https://github.com/Shopify/sarama/pull/1385)).
-- Reduce packet decoder allocations
- ([1373](https://github.com/Shopify/sarama/pull/1373)).
-- Support timeout when fetching metadata
- ([1359](https://github.com/Shopify/sarama/pull/1359)).
-
-Bug Fixes:
-- Fix fetch size integer overflow
- ([1376](https://github.com/Shopify/sarama/pull/1376)).
-- Handle and log throttled FetchResponses
- ([1383](https://github.com/Shopify/sarama/pull/1383)).
-- Refactor misspelled word Resouce to Resource
- ([1368](https://github.com/Shopify/sarama/pull/1368)).
-
-#### Version 1.22.1 (2019-04-29)
-
-Improvements:
-- Use zstd 1.3.8
- ([1350](https://github.com/Shopify/sarama/pull/1350)).
-- Add support for SaslHandshakeRequest v1
- ([1354](https://github.com/Shopify/sarama/pull/1354)).
-
-Bug Fixes:
-- Fix V5 MetadataRequest nullable topics array
- ([1353](https://github.com/Shopify/sarama/pull/1353)).
-- Use a different SCRAM client for each broker connection
- ([1349](https://github.com/Shopify/sarama/pull/1349)).
-- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
- ([1344](https://github.com/Shopify/sarama/pull/1344)).
-
-#### Version 1.22.0 (2019-04-09)
-
-New Features:
-- Add Offline Replicas Operation to Client
- ([1318](https://github.com/Shopify/sarama/pull/1318)).
-- Allow using proxy when connecting to broker
- ([1326](https://github.com/Shopify/sarama/pull/1326)).
-- Implement ReadCommitted
- ([1307](https://github.com/Shopify/sarama/pull/1307)).
-- Add support for Kafka 2.2.0
- ([1331](https://github.com/Shopify/sarama/pull/1331)).
-- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
- ([1331](https://github.com/Shopify/sarama/pull/1295)).
-
-Improvements:
-- Unregister all broker metrics on broker stop
- ([1232](https://github.com/Shopify/sarama/pull/1232)).
-- Add SCRAM authentication example
- ([1303](https://github.com/Shopify/sarama/pull/1303)).
-- Add consumergroup examples
- ([1304](https://github.com/Shopify/sarama/pull/1304)).
-- Expose consumer batch size metric
- ([1296](https://github.com/Shopify/sarama/pull/1296)).
-- Add TLS options to console producer and consumer
- ([1300](https://github.com/Shopify/sarama/pull/1300)).
-- Reduce client close bookkeeping
- ([1297](https://github.com/Shopify/sarama/pull/1297)).
-- Satisfy error interface in create responses
- ([1154](https://github.com/Shopify/sarama/pull/1154)).
-- Please lint gods
- ([1346](https://github.com/Shopify/sarama/pull/1346)).
-
-Bug Fixes:
-- Fix multi consumer group instance crash
- ([1338](https://github.com/Shopify/sarama/pull/1338)).
-- Update lz4 to latest version
- ([1347](https://github.com/Shopify/sarama/pull/1347)).
-- Retry ErrNotCoordinatorForConsumer in new consumergroup session
- ([1231](https://github.com/Shopify/sarama/pull/1231)).
-- Fix cleanup error handler
- ([1332](https://github.com/Shopify/sarama/pull/1332)).
-- Fix rate condition in PartitionConsumer
- ([1156](https://github.com/Shopify/sarama/pull/1156)).
-
-#### Version 1.21.0 (2019-02-24)
-
-New Features:
-- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
- ([1236](https://github.com/Shopify/sarama/pull/1236)).
-- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
- ([1178](https://github.com/Shopify/sarama/pull/1178)).
-- Implement SASL/OAUTHBEARER
- ([1240](https://github.com/Shopify/sarama/pull/1240)).
-
-Improvements:
-- Add Go mod support
- ([1282](https://github.com/Shopify/sarama/pull/1282)).
-- Add error codes 73—76
- ([1239](https://github.com/Shopify/sarama/pull/1239)).
-- Add retry backoff function
- ([1160](https://github.com/Shopify/sarama/pull/1160)).
-- Maintain metadata in the producer even when retries are disabled
- ([1189](https://github.com/Shopify/sarama/pull/1189)).
-- Include ReplicaAssignment in ListTopics
- ([1274](https://github.com/Shopify/sarama/pull/1274)).
-- Add producer performance tool
- ([1222](https://github.com/Shopify/sarama/pull/1222)).
-- Add support LogAppend timestamps
- ([1258](https://github.com/Shopify/sarama/pull/1258)).
-
-Bug Fixes:
-- Fix potential deadlock when a heartbeat request fails
- ([1286](https://github.com/Shopify/sarama/pull/1286)).
-- Fix consuming compacted topic
- ([1227](https://github.com/Shopify/sarama/pull/1227)).
-- Set correct Kafka version for DescribeConfigsRequest v1
- ([1277](https://github.com/Shopify/sarama/pull/1277)).
-- Update kafka test version
- ([1273](https://github.com/Shopify/sarama/pull/1273)).
-
-#### Version 1.20.1 (2019-01-10)
-
-New Features:
-- Add optional replica id in offset request
- ([1100](https://github.com/Shopify/sarama/pull/1100)).
-
-Improvements:
-- Implement DescribeConfigs Request + Response v1 & v2
- ([1230](https://github.com/Shopify/sarama/pull/1230)).
-- Reuse compression objects
- ([1185](https://github.com/Shopify/sarama/pull/1185)).
-- Switch from png to svg for GoDoc link in README
- ([1243](https://github.com/Shopify/sarama/pull/1243)).
-- Fix typo in deprecation notice for FetchResponseBlock.Records
- ([1242](https://github.com/Shopify/sarama/pull/1242)).
-- Fix typos in consumer metadata response file
- ([1244](https://github.com/Shopify/sarama/pull/1244)).
-
-Bug Fixes:
-- Revert to individual msg retries for non-idempotent
- ([1203](https://github.com/Shopify/sarama/pull/1203)).
-- Respect MaxMessageBytes limit for uncompressed messages
- ([1141](https://github.com/Shopify/sarama/pull/1141)).
-
-#### Version 1.20.0 (2018-12-10)
-
-New Features:
- - Add support for zstd compression
- ([#1170](https://github.com/Shopify/sarama/pull/1170)).
- - Add support for Idempotent Producer
- ([#1152](https://github.com/Shopify/sarama/pull/1152)).
- - Add support support for Kafka 2.1.0
- ([#1229](https://github.com/Shopify/sarama/pull/1229)).
- - Add support support for OffsetCommit request/response pairs versions v1 to v5
- ([#1201](https://github.com/Shopify/sarama/pull/1201)).
- - Add support support for OffsetFetch request/response pair up to version v5
- ([#1198](https://github.com/Shopify/sarama/pull/1198)).
-
-Improvements:
- - Export broker's Rack setting
- ([#1173](https://github.com/Shopify/sarama/pull/1173)).
- - Always use latest patch version of Go on CI
- ([#1202](https://github.com/Shopify/sarama/pull/1202)).
- - Add error codes 61 to 72
- ([#1195](https://github.com/Shopify/sarama/pull/1195)).
-
-Bug Fixes:
- - Fix build without cgo
- ([#1182](https://github.com/Shopify/sarama/pull/1182)).
- - Fix go vet suggestion in consumer group file
- ([#1209](https://github.com/Shopify/sarama/pull/1209)).
- - Fix typos in code and comments
- ([#1228](https://github.com/Shopify/sarama/pull/1228)).
-
-#### Version 1.19.0 (2018-09-27)
-
-New Features:
- - Implement a higher-level consumer group
- ([#1099](https://github.com/Shopify/sarama/pull/1099)).
-
-Improvements:
- - Add support for Go 1.11
- ([#1176](https://github.com/Shopify/sarama/pull/1176)).
-
-Bug Fixes:
- - Fix encoding of `MetadataResponse` with version 2 and higher
- ([#1174](https://github.com/Shopify/sarama/pull/1174)).
- - Fix race condition in mock async producer
- ([#1174](https://github.com/Shopify/sarama/pull/1174)).
-
-#### Version 1.18.0 (2018-09-07)
-
-New Features:
- - Make `Partitioner.RequiresConsistency` vary per-message
- ([#1112](https://github.com/Shopify/sarama/pull/1112)).
- - Add customizable partitioner
- ([#1118](https://github.com/Shopify/sarama/pull/1118)).
- - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
- `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
- ([#1055](https://github.com/Shopify/sarama/pull/1055)).
-
-Improvements:
- - Add support for Kafka 2.0.0
- ([#1149](https://github.com/Shopify/sarama/pull/1149)).
- - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
- ([#1123](https://github.com/Shopify/sarama/pull/1123)).
- - Simpler offset management
- ([#1127](https://github.com/Shopify/sarama/pull/1127)).
-
-Bug Fixes:
- - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
- ([#1110](https://github.com/Shopify/sarama/pull/1110)).
- - Fix consumer block when response did not contain all the
- expected topic/partition blocks
- ([#1086](https://github.com/Shopify/sarama/pull/1086)).
- - Fix consumer block when response contains only constrol messages
- ([#1115](https://github.com/Shopify/sarama/pull/1115)).
- - Add timeout config for ClusterAdmin requests
- ([#1142](https://github.com/Shopify/sarama/pull/1142)).
- - Add version check when producing message with headers
- ([#1117](https://github.com/Shopify/sarama/pull/1117)).
- - Fix `MetadataRequest` for empty list of topics
- ([#1132](https://github.com/Shopify/sarama/pull/1132)).
- - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
- ([#1125](https://github.com/Shopify/sarama/pull/1125)).
-
-#### Version 1.17.0 (2018-05-30)
-
-New Features:
- - Add support for gzip compression levels
- ([#1044](https://github.com/Shopify/sarama/pull/1044)).
- - Add support for Metadata request/response pairs versions v1 to v5
- ([#1047](https://github.com/Shopify/sarama/pull/1047),
- [#1069](https://github.com/Shopify/sarama/pull/1069)).
- - Add versioning to JoinGroup request/response pairs
- ([#1098](https://github.com/Shopify/sarama/pull/1098))
- - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
- ([#1065](https://github.com/Shopify/sarama/pull/1065),
- [#1096](https://github.com/Shopify/sarama/pull/1096),
- [#1027](https://github.com/Shopify/sarama/pull/1027)).
- - Add `Controller()` method to Client interface
- ([#1063](https://github.com/Shopify/sarama/pull/1063)).
-
-Improvements:
- - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
- ([#1010](https://github.com/Shopify/sarama/pull/1010)).
- - Expose missing protocol parts: `msgSet` and `recordBatch`
- ([#1049](https://github.com/Shopify/sarama/pull/1049)).
- - Add support for v1 DeleteTopics Request
- ([#1052](https://github.com/Shopify/sarama/pull/1052)).
- - Add support for Go 1.10
- ([#1064](https://github.com/Shopify/sarama/pull/1064)).
- - Claim support for Kafka 1.1.0
- ([#1073](https://github.com/Shopify/sarama/pull/1073)).
-
-Bug Fixes:
- - Fix FindCoordinatorResponse.encode to allow nil Coordinator
- ([#1050](https://github.com/Shopify/sarama/pull/1050),
- [#1051](https://github.com/Shopify/sarama/pull/1051)).
- - Clear all metadata when we have the latest topic info
- ([#1033](https://github.com/Shopify/sarama/pull/1033)).
- - Make `PartitionConsumer.Close` idempotent
- ([#1092](https://github.com/Shopify/sarama/pull/1092)).
-
-#### Version 1.16.0 (2018-02-12)
-
-New Features:
- - Add support for the Create/Delete Topics request/response pairs
- ([#1007](https://github.com/Shopify/sarama/pull/1007),
- [#1008](https://github.com/Shopify/sarama/pull/1008)).
- - Add support for the Describe/Create/Delete ACL request/response pairs
- ([#1009](https://github.com/Shopify/sarama/pull/1009)).
- - Add support for the five transaction-related request/response pairs
- ([#1016](https://github.com/Shopify/sarama/pull/1016)).
-
-Improvements:
- - Permit setting version on mock producer responses
- ([#999](https://github.com/Shopify/sarama/pull/999)).
- - Add `NewMockBrokerListener` helper for testing TLS connections
- ([#1019](https://github.com/Shopify/sarama/pull/1019)).
- - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
- which results in much higher throughput in most cases
- ([#1024](https://github.com/Shopify/sarama/pull/1024)).
- - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
- reduce CPU and memory usage when processing many partitions
- ([#1028](https://github.com/Shopify/sarama/pull/1028)).
- - Assign relative offsets to messages in the producer to save the brokers a
- recompression pass
- ([#1002](https://github.com/Shopify/sarama/pull/1002),
- [#1015](https://github.com/Shopify/sarama/pull/1015)).
-
-Bug Fixes:
- - Fix producing uncompressed batches with the new protocol format
- ([#1032](https://github.com/Shopify/sarama/issues/1032)).
- - Fix consuming compacted topics with the new protocol format
- ([#1005](https://github.com/Shopify/sarama/issues/1005)).
- - Fix consuming topics with a mix of protocol formats
- ([#1021](https://github.com/Shopify/sarama/issues/1021)).
- - Fix consuming when the broker includes multiple batches in a single response
- ([#1022](https://github.com/Shopify/sarama/issues/1022)).
- - Fix detection of `PartialTrailingMessage` when the partial message was
- truncated before the magic value indicating its version
- ([#1030](https://github.com/Shopify/sarama/pull/1030)).
- - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
- ([#1035](https://github.com/Shopify/sarama/pull/1035)).
-
-#### Version 1.15.0 (2017-12-08)
-
-New Features:
- - Claim official support for Kafka 1.0, though it did already work
- ([#984](https://github.com/Shopify/sarama/pull/984)).
- - Helper methods for Kafka version numbers to/from strings
- ([#989](https://github.com/Shopify/sarama/pull/989)).
- - Implement CreatePartitions request/response
- ([#985](https://github.com/Shopify/sarama/pull/985)).
-
-Improvements:
- - Add error codes 45-60
- ([#986](https://github.com/Shopify/sarama/issues/986)).
-
-Bug Fixes:
- - Fix slow consuming for certain Kafka 0.11/1.0 configurations
- ([#982](https://github.com/Shopify/sarama/pull/982)).
- - Correctly determine when a FetchResponse contains the new message format
- ([#990](https://github.com/Shopify/sarama/pull/990)).
- - Fix producing with multiple headers
- ([#996](https://github.com/Shopify/sarama/pull/996)).
- - Fix handling of truncated record batches
- ([#998](https://github.com/Shopify/sarama/pull/998)).
- - Fix leaking metrics when closing brokers
- ([#991](https://github.com/Shopify/sarama/pull/991)).
-
-#### Version 1.14.0 (2017-11-13)
-
-New Features:
- - Add support for the new Kafka 0.11 record-batch format, including the wire
- protocol and the necessary behavioural changes in the producer and consumer.
- Transactions and idempotency are not yet supported, but producing and
- consuming should work with all the existing bells and whistles (batching,
- compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
- of Arista Networks for this work. Part of
- ([#901](https://github.com/Shopify/sarama/issues/901)).
-
-Bug Fixes:
- - Fix encoding of ProduceResponse versions in test
- ([#970](https://github.com/Shopify/sarama/pull/970)).
- - Return partial replicas list when we have it
- ([#975](https://github.com/Shopify/sarama/pull/975)).
-
-#### Version 1.13.0 (2017-10-04)
-
-New Features:
- - Support for FetchRequest version 3
- ([#905](https://github.com/Shopify/sarama/pull/905)).
- - Permit setting version on mock FetchResponses
- ([#939](https://github.com/Shopify/sarama/pull/939)).
- - Add a configuration option to support storing only minimal metadata for
- extremely large clusters
- ([#937](https://github.com/Shopify/sarama/pull/937)).
- - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
- ([#932](https://github.com/Shopify/sarama/pull/932)).
-
-Improvements:
- - Provide the block-level timestamp when consuming compressed messages
- ([#885](https://github.com/Shopify/sarama/issues/885)).
- - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
- by the broker, which can be meaningful
- ([#930](https://github.com/Shopify/sarama/pull/930)).
- - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
- variance in the actual timeout
- ([#933](https://github.com/Shopify/sarama/pull/933)).
-
-Bug Fixes:
- - Gracefully handle messages with negative timestamps
- ([#907](https://github.com/Shopify/sarama/pull/907)).
- - Raise a proper error when encountering an unknown message version
- ([#940](https://github.com/Shopify/sarama/pull/940)).
-
-#### Version 1.12.0 (2017-05-08)
-
-New Features:
- - Added support for the `ApiVersions` request and response pair, and Kafka
- version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
- that you still need to specify the Kafka version in the Sarama configuration
- for the time being.
- - Added a `Brokers` method to the Client which returns the complete set of
- active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- - Added an `InSyncReplicas` method to the Client which returns the set of all
- in-sync broker IDs for the given partition, now that the Kafka versions for
- which this was misleading are no longer in our supported set
- ([#872](https://github.com/Shopify/sarama/pull/872)).
- - Added a `NewCustomHashPartitioner` method which allows constructing a hash
- partitioner with a custom hash method in case the default (FNV-1a) is not
- suitable
- ([#837](https://github.com/Shopify/sarama/pull/837),
- [#841](https://github.com/Shopify/sarama/pull/841)).
-
-Improvements:
- - Recognize more Kafka error codes
- ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-Bug Fixes:
- - Fix an issue where decoding a malformed FetchRequest would not return the
- correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- - Respect ordering of group protocols in JoinGroupRequests. This fix is
- transparent if you're using the `AddGroupProtocol` or
- `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
- the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
- ([#812](https://github.com/Shopify/sarama/issues/812)).
- - Fix an alignment-related issue with atomics on 32-bit architectures
- ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-#### Version 1.11.0 (2016-12-20)
-
-_Important:_ As of Sarama 1.11 it is necessary to set the config value of
-`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
-versions would silently override this value when instantiating a SyncProducer
-which led to unexpected values and data races.
-
-New Features:
- - Metrics! Thanks to Sébastien Launay for all his work on this feature
- ([#701](https://github.com/Shopify/sarama/pull/701),
- [#746](https://github.com/Shopify/sarama/pull/746),
- [#766](https://github.com/Shopify/sarama/pull/766)).
- - Add support for LZ4 compression
- ([#786](https://github.com/Shopify/sarama/pull/786)).
- - Add support for ListOffsetRequest v1 and Kafka 0.10.1
- ([#775](https://github.com/Shopify/sarama/pull/775)).
- - Added a `HighWaterMarks` method to the Consumer which aggregates the
- `HighWaterMarkOffset` values of its child topic/partitions
- ([#769](https://github.com/Shopify/sarama/pull/769)).
-
-Bug Fixes:
- - Fixed producing when using timestamps, compression and Kafka 0.10
- ([#759](https://github.com/Shopify/sarama/pull/759)).
- - Added missing decoder methods to DescribeGroups response
- ([#756](https://github.com/Shopify/sarama/pull/756)).
- - Fix producer shutdown when `Return.Errors` is disabled
- ([#787](https://github.com/Shopify/sarama/pull/787)).
- - Don't mutate configuration in SyncProducer
- ([#790](https://github.com/Shopify/sarama/pull/790)).
- - Fix crash on SASL initialization failure
- ([#795](https://github.com/Shopify/sarama/pull/795)).
-
-#### Version 1.10.1 (2016-08-30)
-
-Bug Fixes:
- - Fix the documentation for `HashPartitioner` which was incorrect
- ([#717](https://github.com/Shopify/sarama/pull/717)).
- - Permit client creation even when it is limited by ACLs
- ([#722](https://github.com/Shopify/sarama/pull/722)).
- - Several fixes to the consumer timer optimization code, regressions introduced
- in v1.10.0. Go's timers are finicky
- ([#730](https://github.com/Shopify/sarama/pull/730),
- [#733](https://github.com/Shopify/sarama/pull/733),
- [#734](https://github.com/Shopify/sarama/pull/734)).
- - Handle consuming compressed relative offsets with Kafka 0.10
- ([#735](https://github.com/Shopify/sarama/pull/735)).
-
-#### Version 1.10.0 (2016-08-02)
-
-_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
-Kafka you are running against (via the `config.Version` value) in order to use
-features that may not be compatible with old Kafka versions. If you don't
-specify this value it will default to 0.8.2 (the minimum supported), and trying
-to use more recent features (like the offset manager) will fail with an error.
-
-_Also:_ The offset-manager's behaviour has been changed to match the upstream
-java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
-[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
-offset-manager, please ensure that you are committing one *greater* than the
-last consumed message offset or else you may end up consuming duplicate
-messages.
-
-New Features:
- - Support for Kafka 0.10
- ([#672](https://github.com/Shopify/sarama/pull/672),
- [#678](https://github.com/Shopify/sarama/pull/678),
- [#681](https://github.com/Shopify/sarama/pull/681), and others).
- - Support for configuring the target Kafka version
- ([#676](https://github.com/Shopify/sarama/pull/676)).
- - Batch producing support in the SyncProducer
- ([#677](https://github.com/Shopify/sarama/pull/677)).
- - Extend producer mock to allow setting expectations on message contents
- ([#667](https://github.com/Shopify/sarama/pull/667)).
-
-Improvements:
- - Support `nil` compressed messages for deleting in compacted topics
- ([#634](https://github.com/Shopify/sarama/pull/634)).
- - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
- misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- - Re-use consumer expiry timers, removing one allocation per consumed message
- ([#707](https://github.com/Shopify/sarama/pull/707)).
-
-Bug Fixes:
- - Actually default the client ID to "sarama" like we say we do
- ([#664](https://github.com/Shopify/sarama/pull/664)).
- - Fix a rare issue where `Client.Leader` could return the wrong error
- ([#685](https://github.com/Shopify/sarama/pull/685)).
- - Fix a possible tight loop in the consumer
- ([#693](https://github.com/Shopify/sarama/pull/693)).
- - Match upstream's offset-tracking behaviour
- ([#705](https://github.com/Shopify/sarama/pull/705)).
- - Report UnknownTopicOrPartition errors from the offset manager
- ([#706](https://github.com/Shopify/sarama/pull/706)).
- - Fix possible negative partition value from the HashPartitioner
- ([#709](https://github.com/Shopify/sarama/pull/709)).
-
-#### Version 1.9.0 (2016-05-16)
-
-New Features:
- - Add support for custom offset manager retention durations
- ([#602](https://github.com/Shopify/sarama/pull/602)).
- - Publish low-level mocks to enable testing of third-party producer/consumer
- implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- - Declare support for Golang 1.6
- ([#611](https://github.com/Shopify/sarama/pull/611)).
- - Support for SASL plain-text auth
- ([#648](https://github.com/Shopify/sarama/pull/648)).
-
-Improvements:
- - Simplified broker locking scheme slightly
- ([#604](https://github.com/Shopify/sarama/pull/604)).
- - Documentation cleanup
- ([#605](https://github.com/Shopify/sarama/pull/605),
- [#621](https://github.com/Shopify/sarama/pull/621),
- [#654](https://github.com/Shopify/sarama/pull/654)).
-
-Bug Fixes:
- - Fix race condition shutting down the OffsetManager
- ([#658](https://github.com/Shopify/sarama/pull/658)).
-
-#### Version 1.8.0 (2016-02-01)
-
-New Features:
- - Full support for Kafka 0.9:
- - All protocol messages and fields
- ([#586](https://github.com/Shopify/sarama/pull/586),
- [#588](https://github.com/Shopify/sarama/pull/588),
- [#590](https://github.com/Shopify/sarama/pull/590)).
- - Verified that TLS support works
- ([#581](https://github.com/Shopify/sarama/pull/581)).
- - Fixed the OffsetManager compatibility
- ([#585](https://github.com/Shopify/sarama/pull/585)).
-
-Improvements:
- - Optimize for fewer system calls when reading from the network
- ([#584](https://github.com/Shopify/sarama/pull/584)).
- - Automatically retry `InvalidMessage` errors to match upstream behaviour
- ([#589](https://github.com/Shopify/sarama/pull/589)).
-
-#### Version 1.7.0 (2015-12-11)
-
-New Features:
- - Preliminary support for Kafka 0.9
- ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
- caveats:
- - Protocol-layer support is mostly in place
- ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
- renamed some messages and fields, which we did not in order to preserve API
- compatibility.
- - The producer and consumer work against 0.9, but the offset manager does
- not ([#573](https://github.com/Shopify/sarama/pull/573)).
- - TLS support may or may not work
- ([#581](https://github.com/Shopify/sarama/pull/581)).
-
-Improvements:
- - Don't wait for request timeouts on dead brokers, greatly speeding recovery
- when the TCP connection is left hanging
- ([#548](https://github.com/Shopify/sarama/pull/548)).
- - Refactored part of the producer. The new version provides a much more elegant
- solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
- slightly more efficient, and much more precise in calculating batch sizes
- when compression is used
- ([#549](https://github.com/Shopify/sarama/pull/549),
- [#550](https://github.com/Shopify/sarama/pull/550),
- [#551](https://github.com/Shopify/sarama/pull/551)).
-
-Bug Fixes:
- - Fix race condition in consumer test mock
- ([#553](https://github.com/Shopify/sarama/pull/553)).
-
-#### Version 1.6.1 (2015-09-25)
-
-Bug Fixes:
- - Fix panic that could occur if a user-supplied message value failed to encode
- ([#449](https://github.com/Shopify/sarama/pull/449)).
-
-#### Version 1.6.0 (2015-09-04)
-
-New Features:
- - Implementation of a consumer offset manager using the APIs introduced in
- Kafka 0.8.2. The API is designed mainly for integration into a future
- high-level consumer, not for direct use, although it is *possible* to use it
- directly.
- ([#461](https://github.com/Shopify/sarama/pull/461)).
-
-Improvements:
- - CRC32 calculation is much faster on machines with SSE4.2 instructions,
- removing a major hotspot from most profiles
- ([#255](https://github.com/Shopify/sarama/pull/255)).
-
-Bug Fixes:
- - Make protocol decoding more robust against some malformed packets generated
- by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
- [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
- ([#528](https://github.com/Shopify/sarama/pull/528)).
- - Fix a potential race condition panic in the consumer on shutdown
- ([#529](https://github.com/Shopify/sarama/pull/529)).
-
-#### Version 1.5.0 (2015-08-17)
-
-New Features:
- - TLS-encrypted network connections are now supported. This feature is subject
- to change when Kafka releases built-in TLS support, but for now this is
- enough to work with TLS-terminating proxies
- ([#154](https://github.com/Shopify/sarama/pull/154)).
-
-Improvements:
- - The consumer will not block if a single partition is not drained by the user;
- all other partitions will continue to consume normally
- ([#485](https://github.com/Shopify/sarama/pull/485)).
- - Formatting of error strings has been much improved
- ([#495](https://github.com/Shopify/sarama/pull/495)).
- - Internal refactoring of the producer for code cleanliness and to enable
- future work ([#300](https://github.com/Shopify/sarama/pull/300)).
-
-Bug Fixes:
- - Fix a potential deadlock in the consumer on shutdown
- ([#475](https://github.com/Shopify/sarama/pull/475)).
-
-#### Version 1.4.3 (2015-07-21)
-
-Bug Fixes:
- - Don't include the partitioner in the producer's "fetch partitions"
- circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- - Don't retry messages until the broker is closed when abandoning a broker in
- the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- - Update the import path for snappy-go, it has moved again and the API has
- changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
-
-#### Version 1.4.2 (2015-05-27)
-
-Bug Fixes:
- - Update the import path for snappy-go, it has moved from google code to github
- ([#456](https://github.com/Shopify/sarama/pull/456)).
-
-#### Version 1.4.1 (2015-05-25)
-
-Improvements:
- - Optimizations when decoding snappy messages, thanks to John Potocny
- ([#446](https://github.com/Shopify/sarama/pull/446)).
-
-Bug Fixes:
- - Fix hypothetical race conditions on producer shutdown
- ([#450](https://github.com/Shopify/sarama/pull/450),
- [#451](https://github.com/Shopify/sarama/pull/451)).
-
-#### Version 1.4.0 (2015-05-01)
-
-New Features:
- - The consumer now implements `Topics()` and `Partitions()` methods to enable
- users to dynamically choose what topics/partitions to consume without
- instantiating a full client
- ([#431](https://github.com/Shopify/sarama/pull/431)).
- - The partition-consumer now exposes the high water mark offset value returned
- by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- - Added a `kafka-console-consumer` tool capable of handling multiple
- partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
- ([#439](https://github.com/Shopify/sarama/pull/439),
- [#442](https://github.com/Shopify/sarama/pull/442)).
-
-Improvements:
- - The producer's logging during retry scenarios is more consistent, more
- useful, and slightly less verbose
- ([#429](https://github.com/Shopify/sarama/pull/429)).
- - The client now shuffles its initial list of seed brokers in order to prevent
- thundering herd on the first broker in the list
- ([#441](https://github.com/Shopify/sarama/pull/441)).
-
-Bug Fixes:
- - The producer now correctly manages its state if retries occur when it is
- shutting down, fixing several instances of confusing behaviour and at least
- one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- - The consumer now handles messages for different partitions asynchronously,
- making it much more resilient to specific user code ordering
- ([#325](https://github.com/Shopify/sarama/pull/325)).
-
-#### Version 1.3.0 (2015-04-16)
-
-New Features:
- - The client now tracks consumer group coordinators using
- ConsumerMetadataRequests similar to how it tracks partition leadership using
- regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
- This adds two methods to the client API:
- - `Coordinator(consumerGroup string) (*Broker, error)`
- - `RefreshCoordinator(consumerGroup string) error`
-
-Improvements:
- - ConsumerMetadataResponses now automatically create a Broker object out of the
- ID/address/port combination for the Coordinator; accessing the fields
- individually has been deprecated
- ([#413](https://github.com/Shopify/sarama/pull/413)).
- - Much improved handling of `OffsetOutOfRange` errors in the consumer.
- Consumers will fail to start if the provided offset is out of range
- ([#418](https://github.com/Shopify/sarama/pull/418))
- and they will automatically shut down if the offset falls out of range
- ([#424](https://github.com/Shopify/sarama/pull/424)).
- - Small performance improvement in encoding and decoding protocol messages
- ([#427](https://github.com/Shopify/sarama/pull/427)).
-
-Bug Fixes:
- - Fix a rare race condition in the client's background metadata refresher if
- it happens to be activated while the client is being closed
- ([#422](https://github.com/Shopify/sarama/pull/422)).
-
-#### Version 1.2.0 (2015-04-07)
-
-Improvements:
- - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
- ([#389](https://github.com/Shopify/sarama/pull/389)).
- - The producer is now somewhat more memory-efficient during and after retrying
- messages due to an improved queue implementation
- ([#396](https://github.com/Shopify/sarama/pull/396)).
- - The consumer produces much more useful logging output when leadership
- changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- - The client's `GetOffset` method will now automatically refresh metadata and
- retry once in the event of stale information or similar
- ([#394](https://github.com/Shopify/sarama/pull/394)).
- - Broker connections now have support for using TCP keepalives
- ([#407](https://github.com/Shopify/sarama/issues/407)).
-
-Bug Fixes:
- - The OffsetCommitRequest message now correctly implements all three possible
- API versions ([#390](https://github.com/Shopify/sarama/pull/390),
- [#400](https://github.com/Shopify/sarama/pull/400)).
-
-#### Version 1.1.0 (2015-03-20)
-
-Improvements:
- - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
- broken topics don't choke throughput
- ([#373](https://github.com/Shopify/sarama/pull/373)).
-
-Bug Fixes:
- - Fix the producer's internal reference counting in certain unusual scenarios
- ([#367](https://github.com/Shopify/sarama/pull/367)).
- - Fix the consumer's internal reference counting in certain unusual scenarios
- ([#369](https://github.com/Shopify/sarama/pull/369)).
- - Fix a condition where the producer's internal control messages could have
- gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- - Fix an issue where invalid partition lists would be cached when asking for
- metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
-
-
-#### Version 1.0.0 (2015-03-17)
-
-Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
-
-- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
-- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
-- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
-- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
-- All the configuration values have been unified in the `Config` struct.
-- Much improved test suite.
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/LICENSE b/vendor/gopkg.in/Shopify/sarama.v1/LICENSE
deleted file mode 100644
index d2bf435..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2013 Shopify
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/Makefile b/vendor/gopkg.in/Shopify/sarama.v1/Makefile
deleted file mode 100644
index 360b220..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-export GO111MODULE=on
-
-default: fmt vet errcheck test lint
-
-# Taken from https://github.com/codecov/example-go#caveat-multiple-files
-.PHONY: test
-test:
- echo "" > coverage.txt
- for d in `go list ./...`; do \
- go test -p 1 -v -timeout 240s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
- if [ -f profile.out ]; then \
- cat profile.out >> coverage.txt; \
- rm profile.out; \
- fi \
- done
-
-GOLINT := $(shell command -v golint)
-
-.PHONY: lint
-lint:
-ifndef GOLINT
- go get golang.org/x/lint/golint
-endif
- go list ./... | xargs golint
-
-.PHONY: vet
-vet:
- go vet ./...
-
-ERRCHECK := $(shell command -v errcheck)
-# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
-.PHONY: errcheck
-errcheck:
-ifndef ERRCHECK
- go get github.com/kisielk/errcheck
-endif
- errcheck -ignorepkg fmt github.com/Shopify/sarama/...
-
-.PHONY: fmt
-fmt:
- @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
-
-.PHONY : install_dependencies
-install_dependencies: get
-
-.PHONY: get
-get:
- go get -t -v ./...
-
-.PHONY: clean
-clean:
- go clean ./...
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/README.md b/vendor/gopkg.in/Shopify/sarama.v1/README.md
deleted file mode 100644
index 4cd736b..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-sarama
-======
-
-[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
-[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
-[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
-
-Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
-
-### Getting started
-
-- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
-- Mocks for testing are available in the [mocks](./mocks) subpackage.
-- The [examples](./examples) directory contains more elaborate example applications.
-- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
-
-You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
-
-### Compatibility and API stability
-
-Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
-the two latest stable releases of Kafka and Go, and we provide a two month
-grace period for older releases. This means we currently officially support
-Go 1.11 through 1.12, and Kafka 2.0 through 2.3, although older releases are
-still likely to work.
-
-Sarama follows semantic versioning and provides API stability via the gopkg.in service.
-You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
-A changelog is available [here](CHANGELOG.md).
-
-### Contributing
-
-* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
-* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
- technical and design details.
-* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
- contains a wealth of useful information.
-* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
-* If you have any questions, just ask!
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile b/vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile
deleted file mode 100644
index f4b848a..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/Vagrantfile
+++ /dev/null
@@ -1,20 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
-MEMORY = 3072
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- config.vm.box = "ubuntu/trusty64"
-
- config.vm.provision :shell, path: "vagrant/provision.sh"
-
- config.vm.network "private_network", ip: "192.168.100.67"
-
- config.vm.provider "virtualbox" do |v|
- v.memory = MEMORY
- end
-end
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_bindings.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_bindings.go
deleted file mode 100644
index 50b689d..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_bindings.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package sarama
-
-//Resource holds information about acl resource type
-type Resource struct {
- ResourceType AclResourceType
- ResourceName string
- ResourcePatternType AclResourcePatternType
-}
-
-func (r *Resource) encode(pe packetEncoder, version int16) error {
- pe.putInt8(int8(r.ResourceType))
-
- if err := pe.putString(r.ResourceName); err != nil {
- return err
- }
-
- if version == 1 {
- if r.ResourcePatternType == AclPatternUnknown {
- Logger.Print("Cannot encode an unknown resource pattern type, using Literal instead")
- r.ResourcePatternType = AclPatternLiteral
- }
- pe.putInt8(int8(r.ResourcePatternType))
- }
-
- return nil
-}
-
-func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
- resourceType, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.ResourceType = AclResourceType(resourceType)
-
- if r.ResourceName, err = pd.getString(); err != nil {
- return err
- }
- if version == 1 {
- pattern, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.ResourcePatternType = AclResourcePatternType(pattern)
- }
-
- return nil
-}
-
-//Acl holds information about acl type
-type Acl struct {
- Principal string
- Host string
- Operation AclOperation
- PermissionType AclPermissionType
-}
-
-func (a *Acl) encode(pe packetEncoder) error {
- if err := pe.putString(a.Principal); err != nil {
- return err
- }
-
- if err := pe.putString(a.Host); err != nil {
- return err
- }
-
- pe.putInt8(int8(a.Operation))
- pe.putInt8(int8(a.PermissionType))
-
- return nil
-}
-
-func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
- if a.Principal, err = pd.getString(); err != nil {
- return err
- }
-
- if a.Host, err = pd.getString(); err != nil {
- return err
- }
-
- operation, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Operation = AclOperation(operation)
-
- permissionType, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.PermissionType = AclPermissionType(permissionType)
-
- return nil
-}
-
-//ResourceAcls is an acl resource type
-type ResourceAcls struct {
- Resource
- Acls []*Acl
-}
-
-func (r *ResourceAcls) encode(pe packetEncoder, version int16) error {
- if err := r.Resource.encode(pe, version); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.Acls)); err != nil {
- return err
- }
- for _, acl := range r.Acls {
- if err := acl.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
- if err := r.Resource.decode(pd, version); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Acls = make([]*Acl, n)
- for i := 0; i < n; i++ {
- r.Acls[i] = new(Acl)
- if err := r.Acls[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_create_request.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_create_request.go
deleted file mode 100644
index da1cdef..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_create_request.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package sarama
-
-//CreateAclsRequest is an acl creation request
-type CreateAclsRequest struct {
- Version int16
- AclCreations []*AclCreation
-}
-
-func (c *CreateAclsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
- return err
- }
-
- for _, aclCreation := range c.AclCreations {
- if err := aclCreation.encode(pe, c.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
- c.Version = version
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.AclCreations = make([]*AclCreation, n)
-
- for i := 0; i < n; i++ {
- c.AclCreations[i] = new(AclCreation)
- if err := c.AclCreations[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsRequest) key() int16 {
- return 30
-}
-
-func (c *CreateAclsRequest) version() int16 {
- return c.Version
-}
-
-func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
- switch c.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
-
-//AclCreation is a wrapper around Resource and Acl type
-type AclCreation struct {
- Resource
- Acl
-}
-
-func (a *AclCreation) encode(pe packetEncoder, version int16) error {
- if err := a.Resource.encode(pe, version); err != nil {
- return err
- }
- if err := a.Acl.encode(pe); err != nil {
- return err
- }
-
- return nil
-}
-
-func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
- if err := a.Resource.decode(pd, version); err != nil {
- return err
- }
- if err := a.Acl.decode(pd, version); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_create_response.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_create_response.go
deleted file mode 100644
index f5a5e9a..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_create_response.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package sarama
-
-import "time"
-
-//CreateAclsResponse is a an acl reponse creation type
-type CreateAclsResponse struct {
- ThrottleTime time.Duration
- AclCreationResponses []*AclCreationResponse
-}
-
-func (c *CreateAclsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
- return err
- }
-
- for _, aclCreationResponse := range c.AclCreationResponses {
- if err := aclCreationResponse.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.AclCreationResponses = make([]*AclCreationResponse, n)
- for i := 0; i < n; i++ {
- c.AclCreationResponses[i] = new(AclCreationResponse)
- if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsResponse) key() int16 {
- return 30
-}
-
-func (c *CreateAclsResponse) version() int16 {
- return 0
-}
-
-func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-//AclCreationResponse is an acl creation response type
-type AclCreationResponse struct {
- Err KError
- ErrMsg *string
-}
-
-func (a *AclCreationResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(a.Err))
-
- if err := pe.putNullableString(a.ErrMsg); err != nil {
- return err
- }
-
- return nil
-}
-
-func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- a.Err = KError(kerr)
-
- if a.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_delete_request.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_delete_request.go
deleted file mode 100644
index 15908ea..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_delete_request.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package sarama
-
-//DeleteAclsRequest is a delete acl request
-type DeleteAclsRequest struct {
- Version int
- Filters []*AclFilter
-}
-
-func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(d.Filters)); err != nil {
- return err
- }
-
- for _, filter := range d.Filters {
- filter.Version = d.Version
- if err := filter.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
- d.Version = int(version)
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- d.Filters = make([]*AclFilter, n)
- for i := 0; i < n; i++ {
- d.Filters[i] = new(AclFilter)
- d.Filters[i].Version = int(version)
- if err := d.Filters[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsRequest) key() int16 {
- return 31
-}
-
-func (d *DeleteAclsRequest) version() int16 {
- return int16(d.Version)
-}
-
-func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_delete_response.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_delete_response.go
deleted file mode 100644
index 6529565..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_delete_response.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package sarama
-
-import "time"
-
-//DeleteAclsResponse is a delete acl response
-type DeleteAclsResponse struct {
- Version int16
- ThrottleTime time.Duration
- FilterResponses []*FilterResponse
-}
-
-func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
- return err
- }
-
- for _, filterResponse := range d.FilterResponses {
- if err := filterResponse.encode(pe, d.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- d.FilterResponses = make([]*FilterResponse, n)
-
- for i := 0; i < n; i++ {
- d.FilterResponses[i] = new(FilterResponse)
- if err := d.FilterResponses[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsResponse) key() int16 {
- return 31
-}
-
-func (d *DeleteAclsResponse) version() int16 {
- return int16(d.Version)
-}
-
-func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-//FilterResponse is a filter response type
-type FilterResponse struct {
- Err KError
- ErrMsg *string
- MatchingAcls []*MatchingAcl
-}
-
-func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
- pe.putInt16(int16(f.Err))
- if err := pe.putNullableString(f.ErrMsg); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
- return err
- }
- for _, matchingAcl := range f.MatchingAcls {
- if err := matchingAcl.encode(pe, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- f.Err = KError(kerr)
-
- if f.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- f.MatchingAcls = make([]*MatchingAcl, n)
- for i := 0; i < n; i++ {
- f.MatchingAcls[i] = new(MatchingAcl)
- if err := f.MatchingAcls[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-//MatchingAcl is a matching acl type
-type MatchingAcl struct {
- Err KError
- ErrMsg *string
- Resource
- Acl
-}
-
-func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
- pe.putInt16(int16(m.Err))
- if err := pe.putNullableString(m.ErrMsg); err != nil {
- return err
- }
-
- if err := m.Resource.encode(pe, version); err != nil {
- return err
- }
-
- if err := m.Acl.encode(pe); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- m.Err = KError(kerr)
-
- if m.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- if err := m.Resource.decode(pd, version); err != nil {
- return err
- }
-
- if err := m.Acl.decode(pd, version); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_describe_request.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_describe_request.go
deleted file mode 100644
index 5222d46..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_describe_request.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package sarama
-
-//DescribeAclsRequest is a secribe acl request type
-type DescribeAclsRequest struct {
- Version int
- AclFilter
-}
-
-func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
- d.AclFilter.Version = d.Version
- return d.AclFilter.encode(pe)
-}
-
-func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
- d.Version = int(version)
- d.AclFilter.Version = int(version)
- return d.AclFilter.decode(pd, version)
-}
-
-func (d *DescribeAclsRequest) key() int16 {
- return 29
-}
-
-func (d *DescribeAclsRequest) version() int16 {
- return int16(d.Version)
-}
-
-func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_describe_response.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_describe_response.go
deleted file mode 100644
index 12126e5..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_describe_response.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sarama
-
-import "time"
-
-//DescribeAclsResponse is a describe acl response type
-type DescribeAclsResponse struct {
- Version int16
- ThrottleTime time.Duration
- Err KError
- ErrMsg *string
- ResourceAcls []*ResourceAcls
-}
-
-func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(d.Err))
-
- if err := pe.putNullableString(d.ErrMsg); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
- return err
- }
-
- for _, resourceAcl := range d.ResourceAcls {
- if err := resourceAcl.encode(pe, d.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- d.Err = KError(kerr)
-
- errmsg, err := pd.getString()
- if err != nil {
- return err
- }
- if errmsg != "" {
- d.ErrMsg = &errmsg
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- d.ResourceAcls = make([]*ResourceAcls, n)
-
- for i := 0; i < n; i++ {
- d.ResourceAcls[i] = new(ResourceAcls)
- if err := d.ResourceAcls[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DescribeAclsResponse) key() int16 {
- return 29
-}
-
-func (d *DescribeAclsResponse) version() int16 {
- return int16(d.Version)
-}
-
-func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_filter.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_filter.go
deleted file mode 100644
index fad5558..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_filter.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package sarama
-
-type AclFilter struct {
- Version int
- ResourceType AclResourceType
- ResourceName *string
- ResourcePatternTypeFilter AclResourcePatternType
- Principal *string
- Host *string
- Operation AclOperation
- PermissionType AclPermissionType
-}
-
-func (a *AclFilter) encode(pe packetEncoder) error {
- pe.putInt8(int8(a.ResourceType))
- if err := pe.putNullableString(a.ResourceName); err != nil {
- return err
- }
-
- if a.Version == 1 {
- pe.putInt8(int8(a.ResourcePatternTypeFilter))
- }
-
- if err := pe.putNullableString(a.Principal); err != nil {
- return err
- }
- if err := pe.putNullableString(a.Host); err != nil {
- return err
- }
- pe.putInt8(int8(a.Operation))
- pe.putInt8(int8(a.PermissionType))
-
- return nil
-}
-
-func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
- resourceType, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.ResourceType = AclResourceType(resourceType)
-
- if a.ResourceName, err = pd.getNullableString(); err != nil {
- return err
- }
-
- if a.Version == 1 {
- pattern, err := pd.getInt8()
-
- if err != nil {
- return err
- }
-
- a.ResourcePatternTypeFilter = AclResourcePatternType(pattern)
- }
-
- if a.Principal, err = pd.getNullableString(); err != nil {
- return err
- }
-
- if a.Host, err = pd.getNullableString(); err != nil {
- return err
- }
-
- operation, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Operation = AclOperation(operation)
-
- permissionType, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.PermissionType = AclPermissionType(permissionType)
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/acl_types.go b/vendor/gopkg.in/Shopify/sarama.v1/acl_types.go
deleted file mode 100644
index c10ad7b..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/acl_types.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package sarama
-
-type (
- AclOperation int
-
- AclPermissionType int
-
- AclResourceType int
-
- AclResourcePatternType int
-)
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
-const (
- AclOperationUnknown AclOperation = iota
- AclOperationAny
- AclOperationAll
- AclOperationRead
- AclOperationWrite
- AclOperationCreate
- AclOperationDelete
- AclOperationAlter
- AclOperationDescribe
- AclOperationClusterAction
- AclOperationDescribeConfigs
- AclOperationAlterConfigs
- AclOperationIdempotentWrite
-)
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
-const (
- AclPermissionUnknown AclPermissionType = iota
- AclPermissionAny
- AclPermissionDeny
- AclPermissionAllow
-)
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
-const (
- AclResourceUnknown AclResourceType = iota
- AclResourceAny
- AclResourceTopic
- AclResourceGroup
- AclResourceCluster
- AclResourceTransactionalID
-)
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
-const (
- AclPatternUnknown AclResourcePatternType = iota
- AclPatternAny
- AclPatternMatch
- AclPatternLiteral
- AclPatternPrefixed
-)
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/add_offsets_to_txn_request.go b/vendor/gopkg.in/Shopify/sarama.v1/add_offsets_to_txn_request.go
deleted file mode 100644
index fc227ab..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/add_offsets_to_txn_request.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package sarama
-
-//AddOffsetsToTxnRequest adds offsets to a transaction request
-type AddOffsetsToTxnRequest struct {
- TransactionalID string
- ProducerID int64
- ProducerEpoch int16
- GroupID string
-}
-
-func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
- if err := pe.putString(a.TransactionalID); err != nil {
- return err
- }
-
- pe.putInt64(a.ProducerID)
-
- pe.putInt16(a.ProducerEpoch)
-
- if err := pe.putString(a.GroupID); err != nil {
- return err
- }
-
- return nil
-}
-
-func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
- if a.TransactionalID, err = pd.getString(); err != nil {
- return err
- }
- if a.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
- if a.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
- if a.GroupID, err = pd.getString(); err != nil {
- return err
- }
- return nil
-}
-
-func (a *AddOffsetsToTxnRequest) key() int16 {
- return 25
-}
-
-func (a *AddOffsetsToTxnRequest) version() int16 {
- return 0
-}
-
-func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/add_offsets_to_txn_response.go b/vendor/gopkg.in/Shopify/sarama.v1/add_offsets_to_txn_response.go
deleted file mode 100644
index c88c1f8..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/add_offsets_to_txn_response.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-//AddOffsetsToTxnResponse is a response type for adding offsets to txns
-type AddOffsetsToTxnResponse struct {
- ThrottleTime time.Duration
- Err KError
-}
-
-func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(a.Err))
- return nil
-}
-
-func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- a.Err = KError(kerr)
-
- return nil
-}
-
-func (a *AddOffsetsToTxnResponse) key() int16 {
- return 25
-}
-
-func (a *AddOffsetsToTxnResponse) version() int16 {
- return 0
-}
-
-func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/add_partitions_to_txn_request.go b/vendor/gopkg.in/Shopify/sarama.v1/add_partitions_to_txn_request.go
deleted file mode 100644
index 8d4b42e..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/add_partitions_to_txn_request.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package sarama
-
-//AddPartitionsToTxnRequest is a add paartition request
-type AddPartitionsToTxnRequest struct {
- TransactionalID string
- ProducerID int64
- ProducerEpoch int16
- TopicPartitions map[string][]int32
-}
-
-func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
- if err := pe.putString(a.TransactionalID); err != nil {
- return err
- }
- pe.putInt64(a.ProducerID)
- pe.putInt16(a.ProducerEpoch)
-
- if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
- return err
- }
- for topic, partitions := range a.TopicPartitions {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putInt32Array(partitions); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
- if a.TransactionalID, err = pd.getString(); err != nil {
- return err
- }
- if a.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
- if a.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.TopicPartitions = make(map[string][]int32)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
-
- partitions, err := pd.getInt32Array()
- if err != nil {
- return err
- }
-
- a.TopicPartitions[topic] = partitions
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnRequest) key() int16 {
- return 24
-}
-
-func (a *AddPartitionsToTxnRequest) version() int16 {
- return 0
-}
-
-func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/add_partitions_to_txn_response.go b/vendor/gopkg.in/Shopify/sarama.v1/add_partitions_to_txn_response.go
deleted file mode 100644
index eb4f23e..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/add_partitions_to_txn_response.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-//AddPartitionsToTxnResponse is a partition errors to transaction type
-type AddPartitionsToTxnResponse struct {
- ThrottleTime time.Duration
- Errors map[string][]*PartitionError
-}
-
-func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
- if err := pe.putArrayLength(len(a.Errors)); err != nil {
- return err
- }
-
- for topic, e := range a.Errors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(e)); err != nil {
- return err
- }
- for _, partitionError := range e {
- if err := partitionError.encode(pe); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Errors = make(map[string][]*PartitionError)
-
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
-
- m, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Errors[topic] = make([]*PartitionError, m)
-
- for j := 0; j < m; j++ {
- a.Errors[topic][j] = new(PartitionError)
- if err := a.Errors[topic][j].decode(pd, version); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnResponse) key() int16 {
- return 24
-}
-
-func (a *AddPartitionsToTxnResponse) version() int16 {
- return 0
-}
-
-func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-//PartitionError is a partition error type
-type PartitionError struct {
- Partition int32
- Err KError
-}
-
-func (p *PartitionError) encode(pe packetEncoder) error {
- pe.putInt32(p.Partition)
- pe.putInt16(int16(p.Err))
- return nil
-}
-
-func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
- if p.Partition, err = pd.getInt32(); err != nil {
- return err
- }
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- p.Err = KError(kerr)
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/admin.go b/vendor/gopkg.in/Shopify/sarama.v1/admin.go
deleted file mode 100644
index a4d1bc5..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/admin.go
+++ /dev/null
@@ -1,648 +0,0 @@
-package sarama
-
-import (
- "errors"
- "math/rand"
- "sync"
-)
-
-// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
-// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
-// Methods with stricter requirements will specify the minimum broker version required.
-// You MUST call Close() on a client to avoid leaks
-type ClusterAdmin interface {
- // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
- // It may take several seconds after CreateTopic returns success for all the brokers
- // to become aware that the topic has been created. During this time, listTopics
- // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
- CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
-
- // List the topics available in the cluster with the default options.
- ListTopics() (map[string]TopicDetail, error)
-
- // Describe some topics in the cluster.
- DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
-
- // Delete a topic. It may take several seconds after the DeleteTopic to returns success
- // and for all the brokers to become aware that the topics are gone.
- // During this time, listTopics may continue to return information about the deleted topic.
- // If delete.topic.enable is false on the brokers, deleteTopic will mark
- // the topic for deletion, but not actually delete them.
- // This operation is supported by brokers with version 0.10.1.0 or higher.
- DeleteTopic(topic string) error
-
- // Increase the number of partitions of the topics according to the corresponding values.
- // If partitions are increased for a topic that has a key, the partition logic or ordering of
- // the messages will be affected. It may take several seconds after this method returns
- // success for all the brokers to become aware that the partitions have been created.
- // During this time, ClusterAdmin#describeTopics may not return information about the
- // new partitions. This operation is supported by brokers with version 1.0.0 or higher.
- CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
-
- // Delete records whose offset is smaller than the given offset of the corresponding partition.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- DeleteRecords(topic string, partitionOffsets map[int32]int64) error
-
- // Get the configuration for the specified resources.
- // The returned configuration includes default values and the Default is true
- // can be used to distinguish them from user supplied values.
- // Config entries where ReadOnly is true cannot be updated.
- // The value of config entries where Sensitive is true is always nil so
- // sensitive information is not disclosed.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
-
- // Update the configuration for the specified resources with the default options.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- // The resources with their configs (topic is the only resource type with configs
- // that can be updated currently Updates are not transactional so they may succeed
- // for some resources while fail for others. The configs for a particular resource are updated automatically.
- AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
-
- // Creates access control lists (ACLs) which are bound to specific resources.
- // This operation is not transactional so it may succeed for some ACLs while fail for others.
- // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
- // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
- CreateACL(resource Resource, acl Acl) error
-
- // Lists access control lists (ACLs) according to the supplied filter.
- // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- ListAcls(filter AclFilter) ([]ResourceAcls, error)
-
- // Deletes access control lists (ACLs) according to the supplied filters.
- // This operation is not transactional so it may succeed for some ACLs while fail for others.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
-
- // List the consumer groups available in the cluster.
- ListConsumerGroups() (map[string]string, error)
-
- // Describe the given consumer groups.
- DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
-
- // List the consumer group offsets available in the cluster.
- ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
-
- // Delete a consumer group.
- DeleteConsumerGroup(group string) error
-
- // Get information about the nodes in the cluster
- DescribeCluster() (brokers []*Broker, controllerID int32, err error)
-
- // Close shuts down the admin and closes underlying client.
- Close() error
-}
-
-type clusterAdmin struct {
- client Client
- conf *Config
-}
-
-// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
-func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
- client, err := NewClient(addrs, conf)
- if err != nil {
- return nil, err
- }
-
- //make sure we can retrieve the controller
- _, err = client.Controller()
- if err != nil {
- return nil, err
- }
-
- ca := &clusterAdmin{
- client: client,
- conf: client.Config(),
- }
- return ca, nil
-}
-
-func (ca *clusterAdmin) Close() error {
- return ca.client.Close()
-}
-
-func (ca *clusterAdmin) Controller() (*Broker, error) {
- return ca.client.Controller()
-}
-
-func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
-
- if topic == "" {
- return ErrInvalidTopic
- }
-
- if detail == nil {
- return errors.New("you must specify topic details")
- }
-
- topicDetails := make(map[string]*TopicDetail)
- topicDetails[topic] = detail
-
- request := &CreateTopicsRequest{
- TopicDetails: topicDetails,
- ValidateOnly: validateOnly,
- Timeout: ca.conf.Admin.Timeout,
- }
-
- if ca.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 1
- }
- if ca.conf.Version.IsAtLeast(V1_0_0_0) {
- request.Version = 2
- }
-
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.CreateTopics(request)
- if err != nil {
- return err
- }
-
- topicErr, ok := rsp.TopicErrors[topic]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if topicErr.Err != ErrNoError {
- return topicErr
- }
-
- return nil
-}
-
-func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
- controller, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- request := &MetadataRequest{
- Topics: topics,
- AllowAutoTopicCreation: false,
- }
-
- if ca.conf.Version.IsAtLeast(V1_0_0_0) {
- request.Version = 5
- } else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 4
- }
-
- response, err := controller.GetMetadata(request)
- if err != nil {
- return nil, err
- }
- return response.Topics, nil
-}
-
-func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
- controller, err := ca.Controller()
- if err != nil {
- return nil, int32(0), err
- }
-
- request := &MetadataRequest{
- Topics: []string{},
- }
-
- response, err := controller.GetMetadata(request)
- if err != nil {
- return nil, int32(0), err
- }
-
- return response.Brokers, response.ControllerID, nil
-}
-
-func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
- brokers := ca.client.Brokers()
- if len(brokers) > 0 {
- index := rand.Intn(len(brokers))
- return brokers[index], nil
- }
- return nil, errors.New("no available broker")
-}
-
-func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
- // In order to build TopicDetails we need to first get the list of all
- // topics using a MetadataRequest and then get their configs using a
- // DescribeConfigsRequest request. To avoid sending many requests to the
- // broker, we use a single DescribeConfigsRequest.
-
- // Send the all-topic MetadataRequest
- b, err := ca.findAnyBroker()
- if err != nil {
- return nil, err
- }
- _ = b.Open(ca.client.Config())
-
- metadataReq := &MetadataRequest{}
- metadataResp, err := b.GetMetadata(metadataReq)
- if err != nil {
- return nil, err
- }
-
- topicsDetailsMap := make(map[string]TopicDetail)
-
- var describeConfigsResources []*ConfigResource
-
- for _, topic := range metadataResp.Topics {
- topicDetails := TopicDetail{
- NumPartitions: int32(len(topic.Partitions)),
- }
- if len(topic.Partitions) > 0 {
- topicDetails.ReplicaAssignment = map[int32][]int32{}
- for _, partition := range topic.Partitions {
- topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
- }
- topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
- }
- topicsDetailsMap[topic.Name] = topicDetails
-
- // we populate the resources we want to describe from the MetadataResponse
- topicResource := ConfigResource{
- Type: TopicResource,
- Name: topic.Name,
- }
- describeConfigsResources = append(describeConfigsResources, &topicResource)
- }
-
- // Send the DescribeConfigsRequest
- describeConfigsReq := &DescribeConfigsRequest{
- Resources: describeConfigsResources,
- }
- describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
- if err != nil {
- return nil, err
- }
-
- for _, resource := range describeConfigsResp.Resources {
- topicDetails := topicsDetailsMap[resource.Name]
- topicDetails.ConfigEntries = make(map[string]*string)
-
- for _, entry := range resource.Configs {
- // only include non-default non-sensitive config
- // (don't actually think topic config will ever be sensitive)
- if entry.Default || entry.Sensitive {
- continue
- }
- topicDetails.ConfigEntries[entry.Name] = &entry.Value
- }
-
- topicsDetailsMap[resource.Name] = topicDetails
- }
-
- return topicsDetailsMap, nil
-}
-
-func (ca *clusterAdmin) DeleteTopic(topic string) error {
-
- if topic == "" {
- return ErrInvalidTopic
- }
-
- request := &DeleteTopicsRequest{
- Topics: []string{topic},
- Timeout: ca.conf.Admin.Timeout,
- }
-
- if ca.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 1
- }
-
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.DeleteTopics(request)
- if err != nil {
- return err
- }
-
- topicErr, ok := rsp.TopicErrorCodes[topic]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if topicErr != ErrNoError {
- return topicErr
- }
- return nil
-}
-
-func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
- if topic == "" {
- return ErrInvalidTopic
- }
-
- topicPartitions := make(map[string]*TopicPartition)
- topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
-
- request := &CreatePartitionsRequest{
- TopicPartitions: topicPartitions,
- Timeout: ca.conf.Admin.Timeout,
- }
-
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.CreatePartitions(request)
- if err != nil {
- return err
- }
-
- topicErr, ok := rsp.TopicPartitionErrors[topic]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if topicErr.Err != ErrNoError {
- return topicErr
- }
-
- return nil
-}
-
-func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
-
- if topic == "" {
- return ErrInvalidTopic
- }
-
- topics := make(map[string]*DeleteRecordsRequestTopic)
- topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: partitionOffsets}
- request := &DeleteRecordsRequest{
- Topics: topics,
- Timeout: ca.conf.Admin.Timeout,
- }
-
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.DeleteRecords(request)
- if err != nil {
- return err
- }
-
- _, ok := rsp.Topics[topic]
- if !ok {
- return ErrIncompleteResponse
- }
-
- //todo since we are dealing with couple of partitions it would be good if we return slice of errors
- //for each partition instead of one error
- return nil
-}
-
-func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
-
- var entries []ConfigEntry
- var resources []*ConfigResource
- resources = append(resources, &resource)
-
- request := &DescribeConfigsRequest{
- Resources: resources,
- }
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- rsp, err := b.DescribeConfigs(request)
- if err != nil {
- return nil, err
- }
-
- for _, rspResource := range rsp.Resources {
- if rspResource.Name == resource.Name {
- if rspResource.ErrorMsg != "" {
- return nil, errors.New(rspResource.ErrorMsg)
- }
- for _, cfgEntry := range rspResource.Configs {
- entries = append(entries, *cfgEntry)
- }
- }
- }
- return entries, nil
-}
-
-func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
-
- var resources []*AlterConfigsResource
- resources = append(resources, &AlterConfigsResource{
- Type: resourceType,
- Name: name,
- ConfigEntries: entries,
- })
-
- request := &AlterConfigsRequest{
- Resources: resources,
- ValidateOnly: validateOnly,
- }
-
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.AlterConfigs(request)
- if err != nil {
- return err
- }
-
- for _, rspResource := range rsp.Resources {
- if rspResource.Name == name {
- if rspResource.ErrorMsg != "" {
- return errors.New(rspResource.ErrorMsg)
- }
- }
- }
- return nil
-}
-
-func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
- var acls []*AclCreation
- acls = append(acls, &AclCreation{resource, acl})
- request := &CreateAclsRequest{AclCreations: acls}
-
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- _, err = b.CreateAcls(request)
- return err
-}
-
-func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
-
- request := &DescribeAclsRequest{AclFilter: filter}
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- rsp, err := b.DescribeAcls(request)
- if err != nil {
- return nil, err
- }
-
- var lAcls []ResourceAcls
- for _, rAcl := range rsp.ResourceAcls {
- lAcls = append(lAcls, *rAcl)
- }
- return lAcls, nil
-}
-
-func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
- var filters []*AclFilter
- filters = append(filters, &filter)
- request := &DeleteAclsRequest{Filters: filters}
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- rsp, err := b.DeleteAcls(request)
- if err != nil {
- return nil, err
- }
-
- var mAcls []MatchingAcl
- for _, fr := range rsp.FilterResponses {
- for _, mACL := range fr.MatchingAcls {
- mAcls = append(mAcls, *mACL)
- }
-
- }
- return mAcls, nil
-}
-
-func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
- groupsPerBroker := make(map[*Broker][]string)
-
- for _, group := range groups {
- controller, err := ca.client.Coordinator(group)
- if err != nil {
- return nil, err
- }
- groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
-
- }
-
- for broker, brokerGroups := range groupsPerBroker {
- response, err := broker.DescribeGroups(&DescribeGroupsRequest{
- Groups: brokerGroups,
- })
- if err != nil {
- return nil, err
- }
-
- result = append(result, response.Groups...)
- }
- return result, nil
-}
-
-func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
- allGroups = make(map[string]string)
-
- // Query brokers in parallel, since we have to query *all* brokers
- brokers := ca.client.Brokers()
- groupMaps := make(chan map[string]string, len(brokers))
- errors := make(chan error, len(brokers))
- wg := sync.WaitGroup{}
-
- for _, b := range brokers {
- wg.Add(1)
- go func(b *Broker, conf *Config) {
- defer wg.Done()
- _ = b.Open(conf) // Ensure that broker is opened
-
- response, err := b.ListGroups(&ListGroupsRequest{})
- if err != nil {
- errors <- err
- return
- }
-
- groups := make(map[string]string)
- for group, typ := range response.Groups {
- groups[group] = typ
- }
-
- groupMaps <- groups
-
- }(b, ca.conf)
- }
-
- wg.Wait()
- close(groupMaps)
- close(errors)
-
- for groupMap := range groupMaps {
- for group, protocolType := range groupMap {
- allGroups[group] = protocolType
- }
- }
-
- // Intentionally return only the first error for simplicity
- err = <-errors
- return
-}
-
-func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
- coordinator, err := ca.client.Coordinator(group)
- if err != nil {
- return nil, err
- }
-
- request := &OffsetFetchRequest{
- ConsumerGroup: group,
- partitions: topicPartitions,
- }
-
- if ca.conf.Version.IsAtLeast(V0_10_2_0) {
- request.Version = 2
- } else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
- request.Version = 1
- }
-
- return coordinator.FetchOffset(request)
-}
-
-func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
- coordinator, err := ca.client.Coordinator(group)
- if err != nil {
- return err
- }
-
- request := &DeleteGroupsRequest{
- Groups: []string{group},
- }
-
- resp, err := coordinator.DeleteGroups(request)
- if err != nil {
- return err
- }
-
- groupErr, ok := resp.GroupErrorCodes[group]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if groupErr != ErrNoError {
- return groupErr
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/alter_configs_request.go b/vendor/gopkg.in/Shopify/sarama.v1/alter_configs_request.go
deleted file mode 100644
index 26c275b..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/alter_configs_request.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package sarama
-
-//AlterConfigsRequest is an alter config request type
-type AlterConfigsRequest struct {
- Resources []*AlterConfigsResource
- ValidateOnly bool
-}
-
-//AlterConfigsResource is an alter config resource type
-type AlterConfigsResource struct {
- Type ConfigResourceType
- Name string
- ConfigEntries map[string]*string
-}
-
-func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(a.Resources)); err != nil {
- return err
- }
-
- for _, r := range a.Resources {
- if err := r.encode(pe); err != nil {
- return err
- }
- }
-
- pe.putBool(a.ValidateOnly)
- return nil
-}
-
-func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
- resourceCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Resources = make([]*AlterConfigsResource, resourceCount)
- for i := range a.Resources {
- r := &AlterConfigsResource{}
- err = r.decode(pd, version)
- if err != nil {
- return err
- }
- a.Resources[i] = r
- }
-
- validateOnly, err := pd.getBool()
- if err != nil {
- return err
- }
-
- a.ValidateOnly = validateOnly
-
- return nil
-}
-
-func (a *AlterConfigsResource) encode(pe packetEncoder) error {
- pe.putInt8(int8(a.Type))
-
- if err := pe.putString(a.Name); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
- return err
- }
- for configKey, configValue := range a.ConfigEntries {
- if err := pe.putString(configKey); err != nil {
- return err
- }
- if err := pe.putNullableString(configValue); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Type = ConfigResourceType(t)
-
- name, err := pd.getString()
- if err != nil {
- return err
- }
- a.Name = name
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- a.ConfigEntries = make(map[string]*string, n)
- for i := 0; i < n; i++ {
- configKey, err := pd.getString()
- if err != nil {
- return err
- }
- if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
- return err
- }
- }
- }
- return err
-}
-
-func (a *AlterConfigsRequest) key() int16 {
- return 33
-}
-
-func (a *AlterConfigsRequest) version() int16 {
- return 0
-}
-
-func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/alter_configs_response.go b/vendor/gopkg.in/Shopify/sarama.v1/alter_configs_response.go
deleted file mode 100644
index 3893663..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/alter_configs_response.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package sarama
-
-import "time"
-
-//AlterConfigsResponse is a reponse type for alter config
-type AlterConfigsResponse struct {
- ThrottleTime time.Duration
- Resources []*AlterConfigsResourceResponse
-}
-
-//AlterConfigsResourceResponse is a reponse type for alter config resource
-type AlterConfigsResourceResponse struct {
- ErrorCode int16
- ErrorMsg string
- Type ConfigResourceType
- Name string
-}
-
-func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(a.Resources)); err != nil {
- return err
- }
-
- for i := range a.Resources {
- pe.putInt16(a.Resources[i].ErrorCode)
- err := pe.putString(a.Resources[i].ErrorMsg)
- if err != nil {
- return nil
- }
- pe.putInt8(int8(a.Resources[i].Type))
- err = pe.putString(a.Resources[i].Name)
- if err != nil {
- return nil
- }
- }
-
- return nil
-}
-
-func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- responseCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
-
- for i := range a.Resources {
- a.Resources[i] = new(AlterConfigsResourceResponse)
-
- errCode, err := pd.getInt16()
- if err != nil {
- return err
- }
- a.Resources[i].ErrorCode = errCode
-
- e, err := pd.getString()
- if err != nil {
- return err
- }
- a.Resources[i].ErrorMsg = e
-
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Resources[i].Type = ConfigResourceType(t)
-
- name, err := pd.getString()
- if err != nil {
- return err
- }
- a.Resources[i].Name = name
- }
-
- return nil
-}
-
-func (a *AlterConfigsResponse) key() int16 {
- return 32
-}
-
-func (a *AlterConfigsResponse) version() int16 {
- return 0
-}
-
-func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/api_versions_request.go b/vendor/gopkg.in/Shopify/sarama.v1/api_versions_request.go
deleted file mode 100644
index b33167c..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/api_versions_request.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package sarama
-
-//ApiVersionsRequest ...
-type ApiVersionsRequest struct {
-}
-
-func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
- return nil
-}
-
-func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
- return nil
-}
-
-func (a *ApiVersionsRequest) key() int16 {
- return 18
-}
-
-func (a *ApiVersionsRequest) version() int16 {
- return 0
-}
-
-func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/api_versions_response.go b/vendor/gopkg.in/Shopify/sarama.v1/api_versions_response.go
deleted file mode 100644
index bb1f0b3..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/api_versions_response.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package sarama
-
-//ApiVersionsResponseBlock is an api version reponse block type
-type ApiVersionsResponseBlock struct {
- ApiKey int16
- MinVersion int16
- MaxVersion int16
-}
-
-func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
- pe.putInt16(b.ApiKey)
- pe.putInt16(b.MinVersion)
- pe.putInt16(b.MaxVersion)
- return nil
-}
-
-func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
- var err error
-
- if b.ApiKey, err = pd.getInt16(); err != nil {
- return err
- }
-
- if b.MinVersion, err = pd.getInt16(); err != nil {
- return err
- }
-
- if b.MaxVersion, err = pd.getInt16(); err != nil {
- return err
- }
-
- return nil
-}
-
-//ApiVersionsResponse is an api version response type
-type ApiVersionsResponse struct {
- Err KError
- ApiVersions []*ApiVersionsResponseBlock
-}
-
-func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
- return err
- }
- for _, apiVersion := range r.ApiVersions {
- if err := apiVersion.encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Err = KError(kerr)
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
- for i := 0; i < numBlocks; i++ {
- block := new(ApiVersionsResponseBlock)
- if err := block.decode(pd); err != nil {
- return err
- }
- r.ApiVersions[i] = block
- }
-
- return nil
-}
-
-func (r *ApiVersionsResponse) key() int16 {
- return 18
-}
-
-func (r *ApiVersionsResponse) version() int16 {
- return 0
-}
-
-func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/async_producer.go b/vendor/gopkg.in/Shopify/sarama.v1/async_producer.go
deleted file mode 100644
index 11e0849..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/async_producer.go
+++ /dev/null
@@ -1,1104 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "fmt"
- "sync"
- "time"
-
- "github.com/eapache/go-resiliency/breaker"
- "github.com/eapache/queue"
-)
-
-// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
-// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
-// and parses responses for errors. You must read from the Errors() channel or the
-// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
-// leaks: it will not be garbage-collected automatically when it passes out of
-// scope.
-type AsyncProducer interface {
-
- // AsyncClose triggers a shutdown of the producer. The shutdown has completed
- // when both the Errors and Successes channels have been closed. When calling
- // AsyncClose, you *must* continue to read from those channels in order to
- // drain the results of any messages in flight.
- AsyncClose()
-
- // Close shuts down the producer and waits for any buffered messages to be
- // flushed. You must call this function before a producer object passes out of
- // scope, as it may otherwise leak memory. You must call this before calling
- // Close on the underlying client.
- Close() error
-
- // Input is the input channel for the user to write messages to that they
- // wish to send.
- Input() chan<- *ProducerMessage
-
- // Successes is the success output channel back to the user when Return.Successes is
- // enabled. If Return.Successes is true, you MUST read from this channel or the
- // Producer will deadlock. It is suggested that you send and read messages
- // together in a single select statement.
- Successes() <-chan *ProducerMessage
-
- // Errors is the error output channel back to the user. You MUST read from this
- // channel or the Producer will deadlock when the channel is full. Alternatively,
- // you can set Producer.Return.Errors in your config to false, which prevents
- // errors to be returned.
- Errors() <-chan *ProducerError
-}
-
-// transactionManager keeps the state necessary to ensure idempotent production
-type transactionManager struct {
- producerID int64
- producerEpoch int16
- sequenceNumbers map[string]int32
- mutex sync.Mutex
-}
-
-const (
- noProducerID = -1
- noProducerEpoch = -1
-)
-
-func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 {
- key := fmt.Sprintf("%s-%d", topic, partition)
- t.mutex.Lock()
- defer t.mutex.Unlock()
- sequence := t.sequenceNumbers[key]
- t.sequenceNumbers[key] = sequence + 1
- return sequence
-}
-
-func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
- txnmgr := &transactionManager{
- producerID: noProducerID,
- producerEpoch: noProducerEpoch,
- }
-
- if conf.Producer.Idempotent {
- initProducerIDResponse, err := client.InitProducerID()
- if err != nil {
- return nil, err
- }
- txnmgr.producerID = initProducerIDResponse.ProducerID
- txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
- txnmgr.sequenceNumbers = make(map[string]int32)
- txnmgr.mutex = sync.Mutex{}
-
- Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
- }
-
- return txnmgr, nil
-}
-
-type asyncProducer struct {
- client Client
- conf *Config
-
- errors chan *ProducerError
- input, successes, retries chan *ProducerMessage
- inFlight sync.WaitGroup
-
- brokers map[*Broker]*brokerProducer
- brokerRefs map[*brokerProducer]int
- brokerLock sync.Mutex
-
- txnmgr *transactionManager
-}
-
-// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
-func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
- client, err := NewClient(addrs, conf)
- if err != nil {
- return nil, err
- }
- return newAsyncProducer(client)
-}
-
-// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
- // For clients passed in by the client, ensure we don't
- // call Close() on it.
- cli := &nopCloserClient{client}
- return newAsyncProducer(cli)
-}
-
-func newAsyncProducer(client Client) (AsyncProducer, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- txnmgr, err := newTransactionManager(client.Config(), client)
- if err != nil {
- return nil, err
- }
-
- p := &asyncProducer{
- client: client,
- conf: client.Config(),
- errors: make(chan *ProducerError),
- input: make(chan *ProducerMessage),
- successes: make(chan *ProducerMessage),
- retries: make(chan *ProducerMessage),
- brokers: make(map[*Broker]*brokerProducer),
- brokerRefs: make(map[*brokerProducer]int),
- txnmgr: txnmgr,
- }
-
- // launch our singleton dispatchers
- go withRecover(p.dispatcher)
- go withRecover(p.retryHandler)
-
- return p, nil
-}
-
-type flagSet int8
-
-const (
- syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
- fin // final message from partitionProducer to brokerProducer and back
- shutdown // start the shutdown process
-)
-
-// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
-type ProducerMessage struct {
- Topic string // The Kafka topic for this message.
- // The partitioning key for this message. Pre-existing Encoders include
- // StringEncoder and ByteEncoder.
- Key Encoder
- // The actual message to store in Kafka. Pre-existing Encoders include
- // StringEncoder and ByteEncoder.
- Value Encoder
-
- // The headers are key-value pairs that are transparently passed
- // by Kafka between producers and consumers.
- Headers []RecordHeader
-
- // This field is used to hold arbitrary data you wish to include so it
- // will be available when receiving on the Successes and Errors channels.
- // Sarama completely ignores this field and is only to be used for
- // pass-through data.
- Metadata interface{}
-
- // Below this point are filled in by the producer as the message is processed
-
- // Offset is the offset of the message stored on the broker. This is only
- // guaranteed to be defined if the message was successfully delivered and
- // RequiredAcks is not NoResponse.
- Offset int64
- // Partition is the partition that the message was sent to. This is only
- // guaranteed to be defined if the message was successfully delivered.
- Partition int32
- // Timestamp can vary in behaviour depending on broker configuration, being
- // in either one of the CreateTime or LogAppendTime modes (default CreateTime),
- // and requiring version at least 0.10.0.
- //
- // When configured to CreateTime, the timestamp is specified by the producer
- // either by explicitly setting this field, or when the message is added
- // to a produce set.
- //
- // When configured to LogAppendTime, the timestamp assigned to the message
- // by the broker. This is only guaranteed to be defined if the message was
- // successfully delivered and RequiredAcks is not NoResponse.
- Timestamp time.Time
-
- retries int
- flags flagSet
- expectation chan *ProducerError
- sequenceNumber int32
-}
-
-const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
-
-func (m *ProducerMessage) byteSize(version int) int {
- var size int
- if version >= 2 {
- size = maximumRecordOverhead
- for _, h := range m.Headers {
- size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
- }
- } else {
- size = producerMessageOverhead
- }
- if m.Key != nil {
- size += m.Key.Length()
- }
- if m.Value != nil {
- size += m.Value.Length()
- }
- return size
-}
-
-func (m *ProducerMessage) clear() {
- m.flags = 0
- m.retries = 0
-}
-
-// ProducerError is the type of error generated when the producer fails to deliver a message.
-// It contains the original ProducerMessage as well as the actual error value.
-type ProducerError struct {
- Msg *ProducerMessage
- Err error
-}
-
-func (pe ProducerError) Error() string {
- return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
-}
-
-// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
-// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
-// when closing a producer.
-type ProducerErrors []*ProducerError
-
-func (pe ProducerErrors) Error() string {
- return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
-}
-
-func (p *asyncProducer) Errors() <-chan *ProducerError {
- return p.errors
-}
-
-func (p *asyncProducer) Successes() <-chan *ProducerMessage {
- return p.successes
-}
-
-func (p *asyncProducer) Input() chan<- *ProducerMessage {
- return p.input
-}
-
-func (p *asyncProducer) Close() error {
- p.AsyncClose()
-
- if p.conf.Producer.Return.Successes {
- go withRecover(func() {
- for range p.successes {
- }
- })
- }
-
- var errors ProducerErrors
- if p.conf.Producer.Return.Errors {
- for event := range p.errors {
- errors = append(errors, event)
- }
- } else {
- <-p.errors
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (p *asyncProducer) AsyncClose() {
- go withRecover(p.shutdown)
-}
-
-// singleton
-// dispatches messages by topic
-func (p *asyncProducer) dispatcher() {
- handlers := make(map[string]chan<- *ProducerMessage)
- shuttingDown := false
-
- for msg := range p.input {
- if msg == nil {
- Logger.Println("Something tried to send a nil message, it was ignored.")
- continue
- }
-
- if msg.flags&shutdown != 0 {
- shuttingDown = true
- p.inFlight.Done()
- continue
- } else if msg.retries == 0 {
- if shuttingDown {
- // we can't just call returnError here because that decrements the wait group,
- // which hasn't been incremented yet for this message, and shouldn't be
- pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
- if p.conf.Producer.Return.Errors {
- p.errors <- pErr
- } else {
- Logger.Println(pErr)
- }
- continue
- }
- p.inFlight.Add(1)
- }
-
- version := 1
- if p.conf.Version.IsAtLeast(V0_11_0_0) {
- version = 2
- } else if msg.Headers != nil {
- p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
- continue
- }
- if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
- p.returnError(msg, ErrMessageSizeTooLarge)
- continue
- }
-
- handler := handlers[msg.Topic]
- if handler == nil {
- handler = p.newTopicProducer(msg.Topic)
- handlers[msg.Topic] = handler
- }
-
- handler <- msg
- }
-
- for _, handler := range handlers {
- close(handler)
- }
-}
-
-// one per topic
-// partitions messages, then dispatches them by partition
-type topicProducer struct {
- parent *asyncProducer
- topic string
- input <-chan *ProducerMessage
-
- breaker *breaker.Breaker
- handlers map[int32]chan<- *ProducerMessage
- partitioner Partitioner
-}
-
-func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
- tp := &topicProducer{
- parent: p,
- topic: topic,
- input: input,
- breaker: breaker.New(3, 1, 10*time.Second),
- handlers: make(map[int32]chan<- *ProducerMessage),
- partitioner: p.conf.Producer.Partitioner(topic),
- }
- go withRecover(tp.dispatch)
- return input
-}
-
-func (tp *topicProducer) dispatch() {
- for msg := range tp.input {
- if msg.retries == 0 {
- if err := tp.partitionMessage(msg); err != nil {
- tp.parent.returnError(msg, err)
- continue
- }
- }
- // All messages being retried (sent or not) have already had their retry count updated
- if tp.parent.conf.Producer.Idempotent && msg.retries == 0 {
- msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
- }
-
- handler := tp.handlers[msg.Partition]
- if handler == nil {
- handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
- tp.handlers[msg.Partition] = handler
- }
-
- handler <- msg
- }
-
- for _, handler := range tp.handlers {
- close(handler)
- }
-}
-
-func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
- var partitions []int32
-
- err := tp.breaker.Run(func() (err error) {
- var requiresConsistency = false
- if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
- requiresConsistency = ep.MessageRequiresConsistency(msg)
- } else {
- requiresConsistency = tp.partitioner.RequiresConsistency()
- }
-
- if requiresConsistency {
- partitions, err = tp.parent.client.Partitions(msg.Topic)
- } else {
- partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
- }
- return
- })
-
- if err != nil {
- return err
- }
-
- numPartitions := int32(len(partitions))
-
- if numPartitions == 0 {
- return ErrLeaderNotAvailable
- }
-
- choice, err := tp.partitioner.Partition(msg, numPartitions)
-
- if err != nil {
- return err
- } else if choice < 0 || choice >= numPartitions {
- return ErrInvalidPartition
- }
-
- msg.Partition = partitions[choice]
-
- return nil
-}
-
-// one per partition per topic
-// dispatches messages to the appropriate broker
-// also responsible for maintaining message order during retries
-type partitionProducer struct {
- parent *asyncProducer
- topic string
- partition int32
- input <-chan *ProducerMessage
-
- leader *Broker
- breaker *breaker.Breaker
- brokerProducer *brokerProducer
-
- // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
- // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
- // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
- // therefore whether our buffer is complete and safe to flush)
- highWatermark int
- retryState []partitionRetryState
-}
-
-type partitionRetryState struct {
- buf []*ProducerMessage
- expectChaser bool
-}
-
-func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
- pp := &partitionProducer{
- parent: p,
- topic: topic,
- partition: partition,
- input: input,
-
- breaker: breaker.New(3, 1, 10*time.Second),
- retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
- }
- go withRecover(pp.dispatch)
- return input
-}
-
-func (pp *partitionProducer) backoff(retries int) {
- var backoff time.Duration
- if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
- maxRetries := pp.parent.conf.Producer.Retry.Max
- backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
- } else {
- backoff = pp.parent.conf.Producer.Retry.Backoff
- }
- if backoff > 0 {
- time.Sleep(backoff)
- }
-}
-
-func (pp *partitionProducer) dispatch() {
- // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
- // on the first message
- pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
- if pp.leader != nil {
- pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
- pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
- }
-
- defer func() {
- if pp.brokerProducer != nil {
- pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
- }
- }()
-
- for msg := range pp.input {
-
- if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
- select {
- case <-pp.brokerProducer.abandoned:
- // a message on the abandoned channel means that our current broker selection is out of date
- Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
- pp.brokerProducer = nil
- time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
- default:
- // producer connection is still open.
- }
- }
-
- if msg.retries > pp.highWatermark {
- // a new, higher, retry level; handle it and then back off
- pp.newHighWatermark(msg.retries)
- pp.backoff(msg.retries)
- } else if pp.highWatermark > 0 {
- // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
- if msg.retries < pp.highWatermark {
- // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
- if msg.flags&fin == fin {
- pp.retryState[msg.retries].expectChaser = false
- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
- } else {
- pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
- }
- continue
- } else if msg.flags&fin == fin {
- // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
- // meaning this retry level is done and we can go down (at least) one level and flush that
- pp.retryState[pp.highWatermark].expectChaser = false
- pp.flushRetryBuffers()
- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
- continue
- }
- }
-
- // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
- // without breaking any of our ordering guarantees
-
- if pp.brokerProducer == nil {
- if err := pp.updateLeader(); err != nil {
- pp.parent.returnError(msg, err)
- pp.backoff(msg.retries)
- continue
- }
- Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- }
-
- pp.brokerProducer.input <- msg
- }
-}
-
-func (pp *partitionProducer) newHighWatermark(hwm int) {
- Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
- pp.highWatermark = hwm
-
- // send off a fin so that we know when everything "in between" has made it
- // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
- pp.retryState[pp.highWatermark].expectChaser = true
- pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
- pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
-
- // a new HWM means that our current broker selection is out of date
- Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
- pp.brokerProducer = nil
-}
-
-func (pp *partitionProducer) flushRetryBuffers() {
- Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
- for {
- pp.highWatermark--
-
- if pp.brokerProducer == nil {
- if err := pp.updateLeader(); err != nil {
- pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
- goto flushDone
- }
- Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- }
-
- for _, msg := range pp.retryState[pp.highWatermark].buf {
- pp.brokerProducer.input <- msg
- }
-
- flushDone:
- pp.retryState[pp.highWatermark].buf = nil
- if pp.retryState[pp.highWatermark].expectChaser {
- Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
- break
- } else if pp.highWatermark == 0 {
- Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
- break
- }
- }
-}
-
-func (pp *partitionProducer) updateLeader() error {
- return pp.breaker.Run(func() (err error) {
- if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
- return err
- }
-
- if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
- return err
- }
-
- pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
- pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
-
- return nil
- })
-}
-
-// one per broker; also constructs an associated flusher
-func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
- var (
- input = make(chan *ProducerMessage)
- bridge = make(chan *produceSet)
- responses = make(chan *brokerProducerResponse)
- )
-
- bp := &brokerProducer{
- parent: p,
- broker: broker,
- input: input,
- output: bridge,
- responses: responses,
- buffer: newProduceSet(p),
- currentRetries: make(map[string]map[int32]error),
- }
- go withRecover(bp.run)
-
- // minimal bridge to make the network response `select`able
- go withRecover(func() {
- for set := range bridge {
- request := set.buildRequest()
-
- response, err := broker.Produce(request)
-
- responses <- &brokerProducerResponse{
- set: set,
- err: err,
- res: response,
- }
- }
- close(responses)
- })
-
- if p.conf.Producer.Retry.Max <= 0 {
- bp.abandoned = make(chan struct{})
- }
-
- return bp
-}
-
-type brokerProducerResponse struct {
- set *produceSet
- err error
- res *ProduceResponse
-}
-
-// groups messages together into appropriately-sized batches for sending to the broker
-// handles state related to retries etc
-type brokerProducer struct {
- parent *asyncProducer
- broker *Broker
-
- input chan *ProducerMessage
- output chan<- *produceSet
- responses <-chan *brokerProducerResponse
- abandoned chan struct{}
-
- buffer *produceSet
- timer <-chan time.Time
- timerFired bool
-
- closing error
- currentRetries map[string]map[int32]error
-}
-
-func (bp *brokerProducer) run() {
- var output chan<- *produceSet
- Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
-
- for {
- select {
- case msg := <-bp.input:
- if msg == nil {
- bp.shutdown()
- return
- }
-
- if msg.flags&syn == syn {
- Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
- bp.broker.ID(), msg.Topic, msg.Partition)
- if bp.currentRetries[msg.Topic] == nil {
- bp.currentRetries[msg.Topic] = make(map[int32]error)
- }
- bp.currentRetries[msg.Topic][msg.Partition] = nil
- bp.parent.inFlight.Done()
- continue
- }
-
- if reason := bp.needsRetry(msg); reason != nil {
- bp.parent.retryMessage(msg, reason)
-
- if bp.closing == nil && msg.flags&fin == fin {
- // we were retrying this partition but we can start processing again
- delete(bp.currentRetries[msg.Topic], msg.Partition)
- Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
- bp.broker.ID(), msg.Topic, msg.Partition)
- }
-
- continue
- }
-
- if bp.buffer.wouldOverflow(msg) {
- if err := bp.waitForSpace(msg); err != nil {
- bp.parent.retryMessage(msg, err)
- continue
- }
- }
-
- if err := bp.buffer.add(msg); err != nil {
- bp.parent.returnError(msg, err)
- continue
- }
-
- if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
- bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
- }
- case <-bp.timer:
- bp.timerFired = true
- case output <- bp.buffer:
- bp.rollOver()
- case response := <-bp.responses:
- bp.handleResponse(response)
- }
-
- if bp.timerFired || bp.buffer.readyToFlush() {
- output = bp.output
- } else {
- output = nil
- }
- }
-}
-
-func (bp *brokerProducer) shutdown() {
- for !bp.buffer.empty() {
- select {
- case response := <-bp.responses:
- bp.handleResponse(response)
- case bp.output <- bp.buffer:
- bp.rollOver()
- }
- }
- close(bp.output)
- for response := range bp.responses {
- bp.handleResponse(response)
- }
-
- Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
-}
-
-func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
- if bp.closing != nil {
- return bp.closing
- }
-
- return bp.currentRetries[msg.Topic][msg.Partition]
-}
-
-func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
- Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
-
- for {
- select {
- case response := <-bp.responses:
- bp.handleResponse(response)
- // handling a response can change our state, so re-check some things
- if reason := bp.needsRetry(msg); reason != nil {
- return reason
- } else if !bp.buffer.wouldOverflow(msg) {
- return nil
- }
- case bp.output <- bp.buffer:
- bp.rollOver()
- return nil
- }
- }
-}
-
-func (bp *brokerProducer) rollOver() {
- bp.timer = nil
- bp.timerFired = false
- bp.buffer = newProduceSet(bp.parent)
-}
-
-func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
- if response.err != nil {
- bp.handleError(response.set, response.err)
- } else {
- bp.handleSuccess(response.set, response.res)
- }
-
- if bp.buffer.empty() {
- bp.rollOver() // this can happen if the response invalidated our buffer
- }
-}
-
-func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
- // we iterate through the blocks in the request set, not the response, so that we notice
- // if the response is missing a block completely
- var retryTopics []string
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- if response == nil {
- // this only happens when RequiredAcks is NoResponse, so we have to assume success
- bp.parent.returnSuccesses(pSet.msgs)
- return
- }
-
- block := response.GetBlock(topic, partition)
- if block == nil {
- bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
- return
- }
-
- switch block.Err {
- // Success
- case ErrNoError:
- if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
- for _, msg := range pSet.msgs {
- msg.Timestamp = block.Timestamp
- }
- }
- for i, msg := range pSet.msgs {
- msg.Offset = block.Offset + int64(i)
- }
- bp.parent.returnSuccesses(pSet.msgs)
- // Duplicate
- case ErrDuplicateSequenceNumber:
- bp.parent.returnSuccesses(pSet.msgs)
- // Retriable errors
- case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
- if bp.parent.conf.Producer.Retry.Max <= 0 {
- bp.parent.abandonBrokerConnection(bp.broker)
- bp.parent.returnErrors(pSet.msgs, block.Err)
- } else {
- retryTopics = append(retryTopics, topic)
- }
- // Other non-retriable errors
- default:
- if bp.parent.conf.Producer.Retry.Max <= 0 {
- bp.parent.abandonBrokerConnection(bp.broker)
- }
- bp.parent.returnErrors(pSet.msgs, block.Err)
- }
- })
-
- if len(retryTopics) > 0 {
- if bp.parent.conf.Producer.Idempotent {
- err := bp.parent.client.RefreshMetadata(retryTopics...)
- if err != nil {
- Logger.Printf("Failed refreshing metadata because of %v\n", err)
- }
- }
-
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- block := response.GetBlock(topic, partition)
- if block == nil {
- // handled in the previous "eachPartition" loop
- return
- }
-
- switch block.Err {
- case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
- Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
- bp.broker.ID(), topic, partition, block.Err)
- if bp.currentRetries[topic] == nil {
- bp.currentRetries[topic] = make(map[int32]error)
- }
- bp.currentRetries[topic][partition] = block.Err
- if bp.parent.conf.Producer.Idempotent {
- go bp.parent.retryBatch(topic, partition, pSet, block.Err)
- } else {
- bp.parent.retryMessages(pSet.msgs, block.Err)
- }
- // dropping the following messages has the side effect of incrementing their retry count
- bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
- }
- })
- }
-}
-
-func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
- Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
- produceSet := newProduceSet(p)
- produceSet.msgs[topic] = make(map[int32]*partitionSet)
- produceSet.msgs[topic][partition] = pSet
- produceSet.bufferBytes += pSet.bufferBytes
- produceSet.bufferCount += len(pSet.msgs)
- for _, msg := range pSet.msgs {
- if msg.retries >= p.conf.Producer.Retry.Max {
- p.returnError(msg, kerr)
- return
- }
- msg.retries++
- }
-
- // it's expected that a metadata refresh has been requested prior to calling retryBatch
- leader, err := p.client.Leader(topic, partition)
- if err != nil {
- Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
- for _, msg := range pSet.msgs {
- p.returnError(msg, kerr)
- }
- return
- }
- bp := p.getBrokerProducer(leader)
- bp.output <- produceSet
-}
-
-func (bp *brokerProducer) handleError(sent *produceSet, err error) {
- switch err.(type) {
- case PacketEncodingError:
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- bp.parent.returnErrors(pSet.msgs, err)
- })
- default:
- Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
- bp.parent.abandonBrokerConnection(bp.broker)
- _ = bp.broker.Close()
- bp.closing = err
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- bp.parent.retryMessages(pSet.msgs, err)
- })
- bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- bp.parent.retryMessages(pSet.msgs, err)
- })
- bp.rollOver()
- }
-}
-
-// singleton
-// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
-// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
-func (p *asyncProducer) retryHandler() {
- var msg *ProducerMessage
- buf := queue.New()
-
- for {
- if buf.Length() == 0 {
- msg = <-p.retries
- } else {
- select {
- case msg = <-p.retries:
- case p.input <- buf.Peek().(*ProducerMessage):
- buf.Remove()
- continue
- }
- }
-
- if msg == nil {
- return
- }
-
- buf.Add(msg)
- }
-}
-
-// utility functions
-
-func (p *asyncProducer) shutdown() {
- Logger.Println("Producer shutting down.")
- p.inFlight.Add(1)
- p.input <- &ProducerMessage{flags: shutdown}
-
- p.inFlight.Wait()
-
- err := p.client.Close()
- if err != nil {
- Logger.Println("producer/shutdown failed to close the embedded client:", err)
- }
-
- close(p.input)
- close(p.retries)
- close(p.errors)
- close(p.successes)
-}
-
-func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
- msg.clear()
- pErr := &ProducerError{Msg: msg, Err: err}
- if p.conf.Producer.Return.Errors {
- p.errors <- pErr
- } else {
- Logger.Println(pErr)
- }
- p.inFlight.Done()
-}
-
-func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
- for _, msg := range batch {
- p.returnError(msg, err)
- }
-}
-
-func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
- for _, msg := range batch {
- if p.conf.Producer.Return.Successes {
- msg.clear()
- p.successes <- msg
- }
- p.inFlight.Done()
- }
-}
-
-func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
- if msg.retries >= p.conf.Producer.Retry.Max {
- p.returnError(msg, err)
- } else {
- msg.retries++
- p.retries <- msg
- }
-}
-
-func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
- for _, msg := range batch {
- p.retryMessage(msg, err)
- }
-}
-
-func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- bp := p.brokers[broker]
-
- if bp == nil {
- bp = p.newBrokerProducer(broker)
- p.brokers[broker] = bp
- p.brokerRefs[bp] = 0
- }
-
- p.brokerRefs[bp]++
-
- return bp
-}
-
-func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- p.brokerRefs[bp]--
- if p.brokerRefs[bp] == 0 {
- close(bp.input)
- delete(p.brokerRefs, bp)
-
- if p.brokers[broker] == bp {
- delete(p.brokers, broker)
- }
- }
-}
-
-func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- bc, ok := p.brokers[broker]
- if ok && bc.abandoned != nil {
- close(bc.abandoned)
- }
-
- delete(p.brokers, broker)
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/balance_strategy.go b/vendor/gopkg.in/Shopify/sarama.v1/balance_strategy.go
deleted file mode 100644
index 2fce17f..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/balance_strategy.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package sarama
-
-import (
- "math"
- "sort"
-)
-
-// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
-// It contains an allocation of topic/partitions by memberID in the form of
-// a `memberID -> topic -> partitions` map.
-type BalanceStrategyPlan map[string]map[string][]int32
-
-// Add assigns a topic with a number partitions to a member.
-func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
- if len(partitions) == 0 {
- return
- }
- if _, ok := p[memberID]; !ok {
- p[memberID] = make(map[string][]int32, 1)
- }
- p[memberID][topic] = append(p[memberID][topic], partitions...)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategy is used to balance topics and partitions
-// across members of a consumer group
-type BalanceStrategy interface {
- // Name uniquely identifies the strategy.
- Name() string
-
- // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
- // and returns a distribution plan.
- Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
-// Example with one topic T with six partitions (0..5) and two members (M1, M2):
-// M1: {T: [0, 1, 2]}
-// M2: {T: [3, 4, 5]}
-var BalanceStrategyRange = &balanceStrategy{
- name: "range",
- coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
- step := float64(len(partitions)) / float64(len(memberIDs))
-
- for i, memberID := range memberIDs {
- pos := float64(i)
- min := int(math.Floor(pos*step + 0.5))
- max := int(math.Floor((pos+1)*step + 0.5))
- plan.Add(memberID, topic, partitions[min:max]...)
- }
- },
-}
-
-// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
-// Example with topic T with six partitions (0..5) and two members (M1, M2):
-// M1: {T: [0, 2, 4]}
-// M2: {T: [1, 3, 5]}
-var BalanceStrategyRoundRobin = &balanceStrategy{
- name: "roundrobin",
- coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
- for i, part := range partitions {
- memberID := memberIDs[i%len(memberIDs)]
- plan.Add(memberID, topic, part)
- }
- },
-}
-
-// --------------------------------------------------------------------
-
-type balanceStrategy struct {
- name string
- coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
-}
-
-// Name implements BalanceStrategy.
-func (s *balanceStrategy) Name() string { return s.name }
-
-// Plan implements BalanceStrategy.
-func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
- // Build members by topic map
- mbt := make(map[string][]string)
- for memberID, meta := range members {
- for _, topic := range meta.Topics {
- mbt[topic] = append(mbt[topic], memberID)
- }
- }
-
- // Sort members for each topic
- for topic, memberIDs := range mbt {
- sort.Sort(&balanceStrategySortable{
- topic: topic,
- memberIDs: memberIDs,
- })
- }
-
- // Assemble plan
- plan := make(BalanceStrategyPlan, len(members))
- for topic, memberIDs := range mbt {
- s.coreFn(plan, memberIDs, topic, topics[topic])
- }
- return plan, nil
-}
-
-type balanceStrategySortable struct {
- topic string
- memberIDs []string
-}
-
-func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
-func (p balanceStrategySortable) Swap(i, j int) {
- p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
-}
-func (p balanceStrategySortable) Less(i, j int) bool {
- return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
-}
-
-func balanceStrategyHashValue(vv ...string) uint32 {
- h := uint32(2166136261)
- for _, s := range vv {
- for _, c := range s {
- h ^= uint32(c)
- h *= 16777619
- }
- }
- return h
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/broker.go b/vendor/gopkg.in/Shopify/sarama.v1/broker.go
deleted file mode 100644
index 7b32a03..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/broker.go
+++ /dev/null
@@ -1,1345 +0,0 @@
-package sarama
-
-import (
- "crypto/tls"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- metrics "github.com/rcrowley/go-metrics"
-)
-
-// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
-type Broker struct {
- conf *Config
- rack *string
-
- id int32
- addr string
- correlationID int32
- conn net.Conn
- connErr error
- lock sync.Mutex
- opened int32
- responses chan responsePromise
- done chan bool
-
- registeredMetrics []string
-
- incomingByteRate metrics.Meter
- requestRate metrics.Meter
- requestSize metrics.Histogram
- requestLatency metrics.Histogram
- outgoingByteRate metrics.Meter
- responseRate metrics.Meter
- responseSize metrics.Histogram
- brokerIncomingByteRate metrics.Meter
- brokerRequestRate metrics.Meter
- brokerRequestSize metrics.Histogram
- brokerRequestLatency metrics.Histogram
- brokerOutgoingByteRate metrics.Meter
- brokerResponseRate metrics.Meter
- brokerResponseSize metrics.Histogram
-
- kerberosAuthenticator GSSAPIKerberosAuth
-}
-
-// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
-type SASLMechanism string
-
-const (
- // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
- SASLTypeOAuth = "OAUTHBEARER"
- // SASLTypePlaintext represents the SASL/PLAIN mechanism
- SASLTypePlaintext = "PLAIN"
- // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
- SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
- // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
- SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
- SASLTypeGSSAPI = "GSSAPI"
- // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
- // server negotiate SASL auth using opaque packets.
- SASLHandshakeV0 = int16(0)
- // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
- // server negotiate SASL by wrapping tokens with Kafka protocol headers.
- SASLHandshakeV1 = int16(1)
- // SASLExtKeyAuth is the reserved extension key name sent as part of the
- // SASL/OAUTHBEARER intial client response
- SASLExtKeyAuth = "auth"
-)
-
-// AccessToken contains an access token used to authenticate a
-// SASL/OAUTHBEARER client along with associated metadata.
-type AccessToken struct {
- // Token is the access token payload.
- Token string
- // Extensions is a optional map of arbitrary key-value pairs that can be
- // sent with the SASL/OAUTHBEARER initial client response. These values are
- // ignored by the SASL server if they are unexpected. This feature is only
- // supported by Kafka >= 2.1.0.
- Extensions map[string]string
-}
-
-// AccessTokenProvider is the interface that encapsulates how implementors
-// can generate access tokens for Kafka broker authentication.
-type AccessTokenProvider interface {
- // Token returns an access token. The implementation should ensure token
- // reuse so that multiple calls at connect time do not create multiple
- // tokens. The implementation should also periodically refresh the token in
- // order to guarantee that each call returns an unexpired token. This
- // method should not block indefinitely--a timeout error should be returned
- // after a short period of inactivity so that the broker connection logic
- // can log debugging information and retry.
- Token() (*AccessToken, error)
-}
-
-// SCRAMClient is a an interface to a SCRAM
-// client implementation.
-type SCRAMClient interface {
- // Begin prepares the client for the SCRAM exchange
- // with the server with a user name and a password
- Begin(userName, password, authzID string) error
- // Step steps client through the SCRAM exchange. It is
- // called repeatedly until it errors or `Done` returns true.
- Step(challenge string) (response string, err error)
- // Done should return true when the SCRAM conversation
- // is over.
- Done() bool
-}
-
-type responsePromise struct {
- requestTime time.Time
- correlationID int32
- packets chan []byte
- errors chan error
-}
-
-// NewBroker creates and returns a Broker targeting the given host:port address.
-// This does not attempt to actually connect, you have to call Open() for that.
-func NewBroker(addr string) *Broker {
- return &Broker{id: -1, addr: addr}
-}
-
-// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
-// waiting for the connection to complete. This means that any subsequent operations on the broker will
-// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
-// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
-// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
-func (b *Broker) Open(conf *Config) error {
- if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
- return ErrAlreadyConnected
- }
-
- if conf == nil {
- conf = NewConfig()
- }
-
- err := conf.Validate()
- if err != nil {
- return err
- }
-
- b.lock.Lock()
-
- go withRecover(func() {
- defer b.lock.Unlock()
-
- dialer := net.Dialer{
- Timeout: conf.Net.DialTimeout,
- KeepAlive: conf.Net.KeepAlive,
- LocalAddr: conf.Net.LocalAddr,
- }
-
- if conf.Net.TLS.Enable {
- b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
- } else if conf.Net.Proxy.Enable {
- b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr)
- } else {
- b.conn, b.connErr = dialer.Dial("tcp", b.addr)
- }
- if b.connErr != nil {
- Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
- b.conn = nil
- atomic.StoreInt32(&b.opened, 0)
- return
- }
- b.conn = newBufConn(b.conn)
-
- b.conf = conf
-
- // Create or reuse the global metrics shared between brokers
- b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
- b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
- b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
- b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
- b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
- b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
- b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
- // Do not gather metrics for seeded broker (only used during bootstrap) because they share
- // the same id (-1) and are already exposed through the global metrics above
- if b.id >= 0 {
- b.registerMetrics()
- }
-
- if conf.Net.SASL.Enable {
-
- b.connErr = b.authenticateViaSASL()
-
- if b.connErr != nil {
- err = b.conn.Close()
- if err == nil {
- Logger.Printf("Closed connection to broker %s\n", b.addr)
- } else {
- Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
- }
- b.conn = nil
- atomic.StoreInt32(&b.opened, 0)
- return
- }
- }
-
- b.done = make(chan bool)
- b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
-
- if b.id >= 0 {
- Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
- } else {
- Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
- }
- go withRecover(b.responseReceiver)
- })
-
- return nil
-}
-
-// Connected returns true if the broker is connected and false otherwise. If the broker is not
-// connected but it had tried to connect, the error from that connection attempt is also returned.
-func (b *Broker) Connected() (bool, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- return b.conn != nil, b.connErr
-}
-
-//Close closes the broker resources
-func (b *Broker) Close() error {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.conn == nil {
- return ErrNotConnected
- }
-
- close(b.responses)
- <-b.done
-
- err := b.conn.Close()
-
- b.conn = nil
- b.connErr = nil
- b.done = nil
- b.responses = nil
-
- b.unregisterMetrics()
-
- if err == nil {
- Logger.Printf("Closed connection to broker %s\n", b.addr)
- } else {
- Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
- }
-
- atomic.StoreInt32(&b.opened, 0)
-
- return err
-}
-
-// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
-func (b *Broker) ID() int32 {
- return b.id
-}
-
-// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
-func (b *Broker) Addr() string {
- return b.addr
-}
-
-// Rack returns the broker's rack as retrieved from Kafka's metadata or the
-// empty string if it is not known. The returned value corresponds to the
-// broker's broker.rack configuration setting. Requires protocol version to be
-// at least v0.10.0.0.
-func (b *Broker) Rack() string {
- if b.rack == nil {
- return ""
- }
- return *b.rack
-}
-
-//GetMetadata send a metadata request and returns a metadata response or error
-func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
- response := new(MetadataResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
-func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
- response := new(ConsumerMetadataResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//FindCoordinator sends a find coordinate request and returns a response or error
-func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
- response := new(FindCoordinatorResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//GetAvailableOffsets return an offset response or error
-func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
- response := new(OffsetResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//Produce returns a produce response or error
-func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
- var (
- response *ProduceResponse
- err error
- )
-
- if request.RequiredAcks == NoResponse {
- err = b.sendAndReceive(request, nil)
- } else {
- response = new(ProduceResponse)
- err = b.sendAndReceive(request, response)
- }
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//Fetch returns a FetchResponse or error
-func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
- response := new(FetchResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//CommitOffset return an Offset commit reponse or error
-func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
- response := new(OffsetCommitResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//FetchOffset returns an offset fetch response or error
-func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
- response := new(OffsetFetchResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//JoinGroup returns a join group response or error
-func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
- response := new(JoinGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//SyncGroup returns a sync group response or error
-func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
- response := new(SyncGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//LeaveGroup return a leave group response or error
-func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
- response := new(LeaveGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//Heartbeat returns a heartbeat response or error
-func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
- response := new(HeartbeatResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//ListGroups return a list group response or error
-func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
- response := new(ListGroupsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//DescribeGroups return describe group response or error
-func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
- response := new(DescribeGroupsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//ApiVersions return api version response or error
-func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
- response := new(ApiVersionsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//CreateTopics send a create topic request and returns create topic response
-func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
- response := new(CreateTopicsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//DeleteTopics sends a delete topic request and returns delete topic response
-func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
- response := new(DeleteTopicsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//CreatePartitions sends a create partition request and returns create
-//partitions response or error
-func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
- response := new(CreatePartitionsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//DeleteRecords send a request to delete records and return delete record
-//response or error
-func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
- response := new(DeleteRecordsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//DescribeAcls sends a describe acl request and returns a response or error
-func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
- response := new(DescribeAclsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//CreateAcls sends a create acl request and returns a response or error
-func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
- response := new(CreateAclsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//DeleteAcls sends a delete acl request and returns a response or error
-func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
- response := new(DeleteAclsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//InitProducerID sends an init producer request and returns a response or error
-func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
- response := new(InitProducerIDResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//AddPartitionsToTxn send a request to add partition to txn and returns
-//a response or error
-func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
- response := new(AddPartitionsToTxnResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//AddOffsetsToTxn sends a request to add offsets to txn and returns a response
-//or error
-func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
- response := new(AddOffsetsToTxnResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//EndTxn sends a request to end txn and returns a response or error
-func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
- response := new(EndTxnResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//TxnOffsetCommit sends a request to commit transaction offsets and returns
-//a response or error
-func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
- response := new(TxnOffsetCommitResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//DescribeConfigs sends a request to describe config and returns a response or
-//error
-func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
- response := new(DescribeConfigsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//AlterConfigs sends a request to alter config and return a response or error
-func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
- response := new(AlterConfigsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-//DeleteGroups sends a request to delete groups and returns a response or error
-func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
- response := new(DeleteGroupsResponse)
-
- if err := b.sendAndReceive(request, response); err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.conn == nil {
- if b.connErr != nil {
- return nil, b.connErr
- }
- return nil, ErrNotConnected
- }
-
- if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
- return nil, ErrUnsupportedVersion
- }
-
- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return nil, err
- }
-
- err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
- if err != nil {
- return nil, err
- }
-
- requestTime := time.Now()
- bytes, err := b.conn.Write(buf)
- b.updateOutgoingCommunicationMetrics(bytes) //TODO: should it be after error check
- if err != nil {
- return nil, err
- }
- b.correlationID++
-
- if !promiseResponse {
- // Record request latency without the response
- b.updateRequestLatencyMetrics(time.Since(requestTime))
- return nil, nil
- }
-
- promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
- b.responses <- promise
-
- return &promise, nil
-}
-
-func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
- promise, err := b.send(req, res != nil)
- if err != nil {
- return err
- }
-
- if promise == nil {
- return nil
- }
-
- select {
- case buf := <-promise.packets:
- return versionedDecode(buf, res, req.version())
- case err = <-promise.errors:
- return err
- }
-}
-
-func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
- b.id, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- host, err := pd.getString()
- if err != nil {
- return err
- }
-
- port, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- if version >= 1 {
- b.rack, err = pd.getNullableString()
- if err != nil {
- return err
- }
- }
-
- b.addr = net.JoinHostPort(host, fmt.Sprint(port))
- if _, _, err := net.SplitHostPort(b.addr); err != nil {
- return err
- }
-
- return nil
-}
-
-func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
- host, portstr, err := net.SplitHostPort(b.addr)
- if err != nil {
- return err
- }
-
- port, err := strconv.Atoi(portstr)
- if err != nil {
- return err
- }
-
- pe.putInt32(b.id)
-
- err = pe.putString(host)
- if err != nil {
- return err
- }
-
- pe.putInt32(int32(port))
-
- if version >= 1 {
- err = pe.putNullableString(b.rack)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (b *Broker) responseReceiver() {
- var dead error
- header := make([]byte, 8)
-
- for response := range b.responses {
- if dead != nil {
- response.errors <- dead
- continue
- }
-
- err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
- if err != nil {
- dead = err
- response.errors <- err
- continue
- }
-
- bytesReadHeader, err := io.ReadFull(b.conn, header)
- requestLatency := time.Since(response.requestTime)
- if err != nil {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- dead = err
- response.errors <- err
- continue
- }
-
- decodedHeader := responseHeader{}
- err = decode(header, &decodedHeader)
- if err != nil {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- dead = err
- response.errors <- err
- continue
- }
- if decodedHeader.correlationID != response.correlationID {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- // TODO if decoded ID < cur ID, discard until we catch up
- // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
- dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
- response.errors <- dead
- continue
- }
-
- buf := make([]byte, decodedHeader.length-4)
- bytesReadBody, err := io.ReadFull(b.conn, buf)
- b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
- if err != nil {
- dead = err
- response.errors <- err
- continue
- }
-
- response.packets <- buf
- }
- close(b.done)
-}
-
-func (b *Broker) authenticateViaSASL() error {
- switch b.conf.Net.SASL.Mechanism {
- case SASLTypeOAuth:
- return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
- case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
- return b.sendAndReceiveSASLSCRAMv1()
- case SASLTypeGSSAPI:
- return b.sendAndReceiveKerberos()
- default:
- return b.sendAndReceiveSASLPlainAuth()
- }
-}
-
-func (b *Broker) sendAndReceiveKerberos() error {
- b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
- if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
- b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
- }
- return b.kerberosAuthenticator.Authorize(b)
-}
-
-func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
- rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
-
- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return err
- }
-
- err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
- if err != nil {
- return err
- }
-
- requestTime := time.Now()
- bytes, err := b.conn.Write(buf)
- b.updateOutgoingCommunicationMetrics(bytes)
- if err != nil {
- Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
- return err
- }
- b.correlationID++
- //wait for the response
- header := make([]byte, 8) // response header
- _, err = io.ReadFull(b.conn, header)
- if err != nil {
- Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
- return err
- }
-
- length := binary.BigEndian.Uint32(header[:4])
- payload := make([]byte, length-4)
- n, err := io.ReadFull(b.conn, payload)
- if err != nil {
- Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
- return err
- }
-
- b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
- res := &SaslHandshakeResponse{}
-
- err = versionedDecode(payload, res, 0)
- if err != nil {
- Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
- return err
- }
-
- if res.Err != ErrNoError {
- Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
- return res.Err
- }
-
- Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
- return nil
-}
-
-// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
-// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
-// wraps the SASL flow in the Kafka protocol, which allows for returning
-// meaningful errors on authentication failure.
-//
-// In SASL Plain, Kafka expects the auth header to be in the following format
-// Message format (from https://tools.ietf.org/html/rfc4616):
-//
-// message = [authzid] UTF8NUL authcid UTF8NUL passwd
-// authcid = 1*SAFE ; MUST accept up to 255 octets
-// authzid = 1*SAFE ; MUST accept up to 255 octets
-// passwd = 1*SAFE ; MUST accept up to 255 octets
-// UTF8NUL = %x00 ; UTF-8 encoded NUL character
-//
-// SAFE = UTF1 / UTF2 / UTF3 / UTF4
-// ;; any UTF-8 encoded Unicode character except NUL
-//
-// With SASL v0 handshake and auth then:
-// When credentials are valid, Kafka returns a 4 byte array of null characters.
-// When credentials are invalid, Kafka closes the connection.
-//
-// With SASL v1 handshake and auth then:
-// When credentials are invalid, Kafka replies with a SaslAuthenticate response
-// containing an error code and message detailing the authentication failure.
-func (b *Broker) sendAndReceiveSASLPlainAuth() error {
- // default to V0 to allow for backward compatability when SASL is enabled
- // but not the handshake
- if b.conf.Net.SASL.Handshake {
-
- handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
- if handshakeErr != nil {
- Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
- return handshakeErr
- }
- }
-
- if b.conf.Net.SASL.Version == SASLHandshakeV1 {
- return b.sendAndReceiveV1SASLPlainAuth()
- }
- return b.sendAndReceiveV0SASLPlainAuth()
-}
-
-// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
-func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
-
- length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
- authBytes := make([]byte, length+4) //4 byte length header + auth data
- binary.BigEndian.PutUint32(authBytes, uint32(length))
- copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
-
- err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
- if err != nil {
- Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- requestTime := time.Now()
- bytesWritten, err := b.conn.Write(authBytes)
- b.updateOutgoingCommunicationMetrics(bytesWritten)
- if err != nil {
- Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- header := make([]byte, 4)
- n, err := io.ReadFull(b.conn, header)
- b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
- // If the credentials are valid, we would get a 4 byte response filled with null characters.
- // Otherwise, the broker closes the connection and we get an EOF
- if err != nil {
- Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
- return nil
-}
-
-// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol
-func (b *Broker) sendAndReceiveV1SASLPlainAuth() error {
- correlationID := b.correlationID
-
- requestTime := time.Now()
-
- bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
-
- b.updateOutgoingCommunicationMetrics(bytesWritten)
-
- if err != nil {
- Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- b.correlationID++
-
- bytesRead, err := b.receiveSASLServerResponse(correlationID)
- b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime))
-
- // With v1 sasl we get an error message set in the response we can return
- if err != nil {
- Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error())
- return err
- }
-
- return nil
-}
-
-// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
-// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
-func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
- if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil {
- return err
- }
-
- token, err := provider.Token()
- if err != nil {
- return err
- }
-
- requestTime := time.Now()
- correlationID := b.correlationID
-
- bytesWritten, err := b.sendSASLOAuthBearerClientResponse(token, correlationID)
- if err != nil {
- return err
- }
-
- b.updateOutgoingCommunicationMetrics(bytesWritten)
- b.correlationID++
-
- bytesRead, err := b.receiveSASLServerResponse(correlationID)
- if err != nil {
- return err
- }
-
- requestLatency := time.Since(requestTime)
- b.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
-
- return nil
-}
-
-func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
- if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
- return err
- }
-
- scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
- if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
- return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
- }
-
- msg, err := scramClient.Step("")
- if err != nil {
- return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
-
- }
-
- for !scramClient.Done() {
- requestTime := time.Now()
- correlationID := b.correlationID
- bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
- if err != nil {
- Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- b.updateOutgoingCommunicationMetrics(bytesWritten)
- b.correlationID++
- challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
- if err != nil {
- Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime))
- msg, err = scramClient.Step(string(challenge))
- if err != nil {
- Logger.Println("SASL authentication failed", err)
- return err
- }
- }
-
- Logger.Println("SASL authentication succeeded")
- return nil
-}
-
-func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) {
- rb := &SaslAuthenticateRequest{msg}
- req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return 0, err
- }
-
- if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
- return 0, err
- }
-
- return b.conn.Write(buf)
-}
-
-func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
- buf := make([]byte, responseLengthSize+correlationIDSize)
- _, err := io.ReadFull(b.conn, buf)
- if err != nil {
- return nil, err
- }
-
- header := responseHeader{}
- err = decode(buf, &header)
- if err != nil {
- return nil, err
- }
-
- if header.correlationID != correlationID {
- return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
- }
-
- buf = make([]byte, header.length-correlationIDSize)
- _, err = io.ReadFull(b.conn, buf)
- if err != nil {
- return nil, err
- }
-
- res := &SaslAuthenticateResponse{}
- if err := versionedDecode(buf, res, 0); err != nil {
- return nil, err
- }
- if res.Err != ErrNoError {
- return nil, res.Err
- }
- return res.SaslAuthBytes, nil
-}
-
-// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
-// https://tools.ietf.org/html/rfc7628
-func buildClientInitialResponse(token *AccessToken) ([]byte, error) {
- var ext string
-
- if token.Extensions != nil && len(token.Extensions) > 0 {
- if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
- return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
- }
- ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
- }
-
- resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext))
-
- return resp, nil
-}
-
-// mapToString returns a list of key-value pairs ordered by key.
-// keyValSep separates the key from the value. elemSep separates each pair.
-func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
- buf := make([]string, 0, len(extensions))
-
- for k, v := range extensions {
- buf = append(buf, k+keyValSep+v)
- }
-
- sort.Strings(buf)
-
- return strings.Join(buf, elemSep)
-}
-
-func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
- authBytes := []byte("\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
- rb := &SaslAuthenticateRequest{authBytes}
- req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return 0, err
- }
-
- err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
- if err != nil {
- Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
- return 0, err
- }
- return b.conn.Write(buf)
-}
-
-func (b *Broker) sendSASLOAuthBearerClientResponse(token *AccessToken, correlationID int32) (int, error) {
- initialResp, err := buildClientInitialResponse(token)
- if err != nil {
- return 0, err
- }
-
- rb := &SaslAuthenticateRequest{initialResp}
-
- req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return 0, err
- }
-
- if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
- return 0, err
- }
-
- return b.conn.Write(buf)
-}
-
-func (b *Broker) receiveSASLServerResponse(correlationID int32) (int, error) {
-
- buf := make([]byte, responseLengthSize+correlationIDSize)
-
- bytesRead, err := io.ReadFull(b.conn, buf)
- if err != nil {
- return bytesRead, err
- }
-
- header := responseHeader{}
-
- err = decode(buf, &header)
- if err != nil {
- return bytesRead, err
- }
-
- if header.correlationID != correlationID {
- return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
- }
-
- buf = make([]byte, header.length-correlationIDSize)
-
- c, err := io.ReadFull(b.conn, buf)
- bytesRead += c
- if err != nil {
- return bytesRead, err
- }
-
- res := &SaslAuthenticateResponse{}
-
- if err := versionedDecode(buf, res, 0); err != nil {
- return bytesRead, err
- }
-
- if res.Err != ErrNoError {
- return bytesRead, res.Err
- }
-
- if len(res.SaslAuthBytes) > 0 {
- Logger.Printf("Received SASL auth response: %s", res.SaslAuthBytes)
- }
-
- return bytesRead, nil
-}
-
-func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
- b.updateRequestLatencyMetrics(requestLatency)
- b.responseRate.Mark(1)
-
- if b.brokerResponseRate != nil {
- b.brokerResponseRate.Mark(1)
- }
-
- responseSize := int64(bytes)
- b.incomingByteRate.Mark(responseSize)
- if b.brokerIncomingByteRate != nil {
- b.brokerIncomingByteRate.Mark(responseSize)
- }
-
- b.responseSize.Update(responseSize)
- if b.brokerResponseSize != nil {
- b.brokerResponseSize.Update(responseSize)
- }
-}
-
-func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
- requestLatencyInMs := int64(requestLatency / time.Millisecond)
- b.requestLatency.Update(requestLatencyInMs)
-
- if b.brokerRequestLatency != nil {
- b.brokerRequestLatency.Update(requestLatencyInMs)
- }
-
-}
-
-func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
- b.requestRate.Mark(1)
- if b.brokerRequestRate != nil {
- b.brokerRequestRate.Mark(1)
- }
-
- requestSize := int64(bytes)
- b.outgoingByteRate.Mark(requestSize)
- if b.brokerOutgoingByteRate != nil {
- b.brokerOutgoingByteRate.Mark(requestSize)
- }
-
- b.requestSize.Update(requestSize)
- if b.brokerRequestSize != nil {
- b.brokerRequestSize.Update(requestSize)
- }
-
-}
-
-func (b *Broker) registerMetrics() {
- b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
- b.brokerRequestRate = b.registerMeter("request-rate")
- b.brokerRequestSize = b.registerHistogram("request-size")
- b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
- b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
- b.brokerResponseRate = b.registerMeter("response-rate")
- b.brokerResponseSize = b.registerHistogram("response-size")
-}
-
-func (b *Broker) unregisterMetrics() {
- for _, name := range b.registeredMetrics {
- b.conf.MetricRegistry.Unregister(name)
- }
-}
-
-func (b *Broker) registerMeter(name string) metrics.Meter {
- nameForBroker := getMetricNameForBroker(name, b)
- b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
- return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry)
-}
-
-func (b *Broker) registerHistogram(name string) metrics.Histogram {
- nameForBroker := getMetricNameForBroker(name, b)
- b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
- return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/client.go b/vendor/gopkg.in/Shopify/sarama.v1/client.go
deleted file mode 100644
index c4c54b2..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/client.go
+++ /dev/null
@@ -1,992 +0,0 @@
-package sarama
-
-import (
- "math/rand"
- "sort"
- "sync"
- "time"
-)
-
-// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
-// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
-// automatically when it passes out of scope. It is safe to share a client amongst many
-// users, however Kafka will process requests from a single client strictly in serial,
-// so it is generally more efficient to use the default one client per producer/consumer.
-type Client interface {
- // Config returns the Config struct of the client. This struct should not be
- // altered after it has been created.
- Config() *Config
-
- // Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
- Controller() (*Broker, error)
-
- // Brokers returns the current set of active brokers as retrieved from cluster metadata.
- Brokers() []*Broker
-
- // Topics returns the set of available topics as retrieved from cluster metadata.
- Topics() ([]string, error)
-
- // Partitions returns the sorted list of all partition IDs for the given topic.
- Partitions(topic string) ([]int32, error)
-
- // WritablePartitions returns the sorted list of all writable partition IDs for
- // the given topic, where "writable" means "having a valid leader accepting
- // writes".
- WritablePartitions(topic string) ([]int32, error)
-
- // Leader returns the broker object that is the leader of the current
- // topic/partition, as determined by querying the cluster metadata.
- Leader(topic string, partitionID int32) (*Broker, error)
-
- // Replicas returns the set of all replica IDs for the given partition.
- Replicas(topic string, partitionID int32) ([]int32, error)
-
- // InSyncReplicas returns the set of all in-sync replica IDs for the given
- // partition. In-sync replicas are replicas which are fully caught up with
- // the partition leader.
- InSyncReplicas(topic string, partitionID int32) ([]int32, error)
-
- // OfflineReplicas returns the set of all offline replica IDs for the given
- // partition. Offline replicas are replicas which are offline
- OfflineReplicas(topic string, partitionID int32) ([]int32, error)
-
- // RefreshMetadata takes a list of topics and queries the cluster to refresh the
- // available metadata for those topics. If no topics are provided, it will refresh
- // metadata for all topics.
- RefreshMetadata(topics ...string) error
-
- // GetOffset queries the cluster to get the most recent available offset at the
- // given time (in milliseconds) on the topic/partition combination.
- // Time should be OffsetOldest for the earliest available offset,
- // OffsetNewest for the offset of the message that will be produced next, or a time.
- GetOffset(topic string, partitionID int32, time int64) (int64, error)
-
- // Coordinator returns the coordinating broker for a consumer group. It will
- // return a locally cached value if it's available. You can call
- // RefreshCoordinator to update the cached value. This function only works on
- // Kafka 0.8.2 and higher.
- Coordinator(consumerGroup string) (*Broker, error)
-
- // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
- // in local cache. This function only works on Kafka 0.8.2 and higher.
- RefreshCoordinator(consumerGroup string) error
-
- // InitProducerID retrieves information required for Idempotent Producer
- InitProducerID() (*InitProducerIDResponse, error)
-
- // Close shuts down all broker connections managed by this client. It is required
- // to call this function before a client object passes out of scope, as it will
- // otherwise leak memory. You must close any Producers or Consumers using a client
- // before you close the client.
- Close() error
-
- // Closed returns true if the client has already had Close called on it
- Closed() bool
-}
-
-const (
- // OffsetNewest stands for the log head offset, i.e. the offset that will be
- // assigned to the next message that will be produced to the partition. You
- // can send this to a client's GetOffset method to get this offset, or when
- // calling ConsumePartition to start consuming new messages.
- OffsetNewest int64 = -1
- // OffsetOldest stands for the oldest offset available on the broker for a
- // partition. You can send this to a client's GetOffset method to get this
- // offset, or when calling ConsumePartition to start consuming from the
- // oldest offset that is still available on the broker.
- OffsetOldest int64 = -2
-)
-
-type client struct {
- conf *Config
- closer, closed chan none // for shutting down background metadata updater
-
- // the broker addresses given to us through the constructor are not guaranteed to be returned in
- // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
- // so we store them separately
- seedBrokers []*Broker
- deadSeeds []*Broker
-
- controllerID int32 // cluster controller broker id
- brokers map[int32]*Broker // maps broker ids to brokers
- metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
- metadataTopics map[string]none // topics that need to collect metadata
- coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
-
- // If the number of partitions is large, we can get some churn calling cachedPartitions,
- // so the result is cached. It is important to update this value whenever metadata is changed
- cachedPartitionsResults map[string][maxPartitionIndex][]int32
-
- lock sync.RWMutex // protects access to the maps that hold cluster state.
-}
-
-// NewClient creates a new Client. It connects to one of the given broker addresses
-// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
-// be retrieved from any of the given broker addresses, the client is not created.
-func NewClient(addrs []string, conf *Config) (Client, error) {
- Logger.Println("Initializing new client")
-
- if conf == nil {
- conf = NewConfig()
- }
-
- if err := conf.Validate(); err != nil {
- return nil, err
- }
-
- if len(addrs) < 1 {
- return nil, ConfigurationError("You must provide at least one broker address")
- }
-
- client := &client{
- conf: conf,
- closer: make(chan none),
- closed: make(chan none),
- brokers: make(map[int32]*Broker),
- metadata: make(map[string]map[int32]*PartitionMetadata),
- metadataTopics: make(map[string]none),
- cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
- coordinators: make(map[string]int32),
- }
-
- random := rand.New(rand.NewSource(time.Now().UnixNano()))
- for _, index := range random.Perm(len(addrs)) {
- client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
- }
-
- if conf.Metadata.Full {
- // do an initial fetch of all cluster metadata by specifying an empty list of topics
- err := client.RefreshMetadata()
- switch err {
- case nil:
- break
- case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
- // indicates that maybe part of the cluster is down, but is not fatal to creating the client
- Logger.Println(err)
- default:
- close(client.closed) // we haven't started the background updater yet, so we have to do this manually
- _ = client.Close()
- return nil, err
- }
- }
- go withRecover(client.backgroundMetadataUpdater)
-
- Logger.Println("Successfully initialized new client")
-
- return client, nil
-}
-
-func (client *client) Config() *Config {
- return client.conf
-}
-
-func (client *client) Brokers() []*Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
- brokers := make([]*Broker, 0, len(client.brokers))
- for _, broker := range client.brokers {
- brokers = append(brokers, broker)
- }
- return brokers
-}
-
-func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
- var err error
- for broker := client.any(); broker != nil; broker = client.any() {
-
- req := &InitProducerIDRequest{}
-
- response, err := broker.InitProducerID(req)
- switch err.(type) {
- case nil:
- return response, nil
- default:
- // some error, remove that broker and try again
- Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
- _ = broker.Close()
- client.deregisterBroker(broker)
- }
- }
- return nil, err
-}
-
-func (client *client) Close() error {
- if client.Closed() {
- // Chances are this is being called from a defer() and the error will go unobserved
- // so we go ahead and log the event in this case.
- Logger.Printf("Close() called on already closed client")
- return ErrClosedClient
- }
-
- // shutdown and wait for the background thread before we take the lock, to avoid races
- close(client.closer)
- <-client.closed
-
- client.lock.Lock()
- defer client.lock.Unlock()
- Logger.Println("Closing Client")
-
- for _, broker := range client.brokers {
- safeAsyncClose(broker)
- }
-
- for _, broker := range client.seedBrokers {
- safeAsyncClose(broker)
- }
-
- client.brokers = nil
- client.metadata = nil
- client.metadataTopics = nil
-
- return nil
-}
-
-func (client *client) Closed() bool {
- return client.brokers == nil
-}
-
-func (client *client) Topics() ([]string, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- ret := make([]string, 0, len(client.metadata))
- for topic := range client.metadata {
- ret = append(ret, topic)
- }
-
- return ret, nil
-}
-
-func (client *client) MetadataTopics() ([]string, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- ret := make([]string, 0, len(client.metadataTopics))
- for topic := range client.metadataTopics {
- ret = append(ret, topic)
- }
-
- return ret, nil
-}
-
-func (client *client) Partitions(topic string) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- partitions := client.cachedPartitions(topic, allPartitions)
-
- if len(partitions) == 0 {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- partitions = client.cachedPartitions(topic, allPartitions)
- }
-
- // no partitions found after refresh metadata
- if len(partitions) == 0 {
- return nil, ErrUnknownTopicOrPartition
- }
-
- return partitions, nil
-}
-
-func (client *client) WritablePartitions(topic string) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- partitions := client.cachedPartitions(topic, writablePartitions)
-
- // len==0 catches when it's nil (no such topic) and the odd case when every single
- // partition is undergoing leader election simultaneously. Callers have to be able to handle
- // this function returning an empty slice (which is a valid return value) but catching it
- // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
- // a metadata refresh as a nicety so callers can just try again and don't have to manually
- // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
- if len(partitions) == 0 {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- partitions = client.cachedPartitions(topic, writablePartitions)
- }
-
- if partitions == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- return partitions, nil
-}
-
-func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- metadata := client.cachedMetadata(topic, partitionID)
-
- if metadata == nil {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- metadata = client.cachedMetadata(topic, partitionID)
- }
-
- if metadata == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- if metadata.Err == ErrReplicaNotAvailable {
- return dupInt32Slice(metadata.Replicas), metadata.Err
- }
- return dupInt32Slice(metadata.Replicas), nil
-}
-
-func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- metadata := client.cachedMetadata(topic, partitionID)
-
- if metadata == nil {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- metadata = client.cachedMetadata(topic, partitionID)
- }
-
- if metadata == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- if metadata.Err == ErrReplicaNotAvailable {
- return dupInt32Slice(metadata.Isr), metadata.Err
- }
- return dupInt32Slice(metadata.Isr), nil
-}
-
-func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- metadata := client.cachedMetadata(topic, partitionID)
-
- if metadata == nil {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- metadata = client.cachedMetadata(topic, partitionID)
- }
-
- if metadata == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- if metadata.Err == ErrReplicaNotAvailable {
- return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
- }
- return dupInt32Slice(metadata.OfflineReplicas), nil
-}
-
-func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- leader, err := client.cachedLeader(topic, partitionID)
-
- if leader == nil {
- err = client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- leader, err = client.cachedLeader(topic, partitionID)
- }
-
- return leader, err
-}
-
-func (client *client) RefreshMetadata(topics ...string) error {
- if client.Closed() {
- return ErrClosedClient
- }
-
- // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
- // error. This handles the case by returning an error instead of sending it
- // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
- for _, topic := range topics {
- if len(topic) == 0 {
- return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
- }
- }
-
- deadline := time.Time{}
- if client.conf.Metadata.Timeout > 0 {
- deadline = time.Now().Add(client.conf.Metadata.Timeout)
- }
- return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
-}
-
-func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
- if client.Closed() {
- return -1, ErrClosedClient
- }
-
- offset, err := client.getOffset(topic, partitionID, time)
-
- if err != nil {
- if err := client.RefreshMetadata(topic); err != nil {
- return -1, err
- }
- return client.getOffset(topic, partitionID, time)
- }
-
- return offset, err
-}
-
-func (client *client) Controller() (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- if !client.conf.Version.IsAtLeast(V0_10_0_0) {
- return nil, ErrUnsupportedVersion
- }
-
- controller := client.cachedController()
- if controller == nil {
- if err := client.refreshMetadata(); err != nil {
- return nil, err
- }
- controller = client.cachedController()
- }
-
- if controller == nil {
- return nil, ErrControllerNotAvailable
- }
-
- _ = controller.Open(client.conf)
- return controller, nil
-}
-
-func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- coordinator := client.cachedCoordinator(consumerGroup)
-
- if coordinator == nil {
- if err := client.RefreshCoordinator(consumerGroup); err != nil {
- return nil, err
- }
- coordinator = client.cachedCoordinator(consumerGroup)
- }
-
- if coordinator == nil {
- return nil, ErrConsumerCoordinatorNotAvailable
- }
-
- _ = coordinator.Open(client.conf)
- return coordinator, nil
-}
-
-func (client *client) RefreshCoordinator(consumerGroup string) error {
- if client.Closed() {
- return ErrClosedClient
- }
-
- response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
- if err != nil {
- return err
- }
-
- client.lock.Lock()
- defer client.lock.Unlock()
- client.registerBroker(response.Coordinator)
- client.coordinators[consumerGroup] = response.Coordinator.ID()
- return nil
-}
-
-// private broker management helpers
-
-// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
-// in the brokers map. It returns the broker that is registered, which may be the provided broker,
-// or a previously registered Broker instance. You must hold the write lock before calling this function.
-func (client *client) registerBroker(broker *Broker) {
- if client.brokers[broker.ID()] == nil {
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
- } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
- safeAsyncClose(client.brokers[broker.ID()])
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
- }
-}
-
-// deregisterBroker removes a broker from the seedsBroker list, and if it's
-// not the seedbroker, removes it from brokers map completely.
-func (client *client) deregisterBroker(broker *Broker) {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
- client.deadSeeds = append(client.deadSeeds, broker)
- client.seedBrokers = client.seedBrokers[1:]
- } else {
- // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
- // but we really shouldn't have to; once that loop is made better this case can be
- // removed, and the function generally can be renamed from `deregisterBroker` to
- // `nextSeedBroker` or something
- Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
- delete(client.brokers, broker.ID())
- }
-}
-
-func (client *client) resurrectDeadBrokers() {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
- client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
- client.deadSeeds = nil
-}
-
-func (client *client) any() *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- if len(client.seedBrokers) > 0 {
- _ = client.seedBrokers[0].Open(client.conf)
- return client.seedBrokers[0]
- }
-
- // not guaranteed to be random *or* deterministic
- for _, broker := range client.brokers {
- _ = broker.Open(client.conf)
- return broker
- }
-
- return nil
-}
-
-// private caching/lazy metadata helpers
-
-type partitionType int
-
-const (
- allPartitions partitionType = iota
- writablePartitions
- // If you add any more types, update the partition cache in update()
-
- // Ensure this is the last partition type value
- maxPartitionIndex
-)
-
-func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions := client.metadata[topic]
- if partitions != nil {
- return partitions[partitionID]
- }
-
- return nil
-}
-
-func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions, exists := client.cachedPartitionsResults[topic]
-
- if !exists {
- return nil
- }
- return partitions[partitionSet]
-}
-
-func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
- partitions := client.metadata[topic]
-
- if partitions == nil {
- return nil
- }
-
- ret := make([]int32, 0, len(partitions))
- for _, partition := range partitions {
- if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
- continue
- }
- ret = append(ret, partition.ID)
- }
-
- sort.Sort(int32Slice(ret))
- return ret
-}
-
-func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions := client.metadata[topic]
- if partitions != nil {
- metadata, ok := partitions[partitionID]
- if ok {
- if metadata.Err == ErrLeaderNotAvailable {
- return nil, ErrLeaderNotAvailable
- }
- b := client.brokers[metadata.Leader]
- if b == nil {
- return nil, ErrLeaderNotAvailable
- }
- _ = b.Open(client.conf)
- return b, nil
- }
- }
-
- return nil, ErrUnknownTopicOrPartition
-}
-
-func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
- broker, err := client.Leader(topic, partitionID)
- if err != nil {
- return -1, err
- }
-
- request := &OffsetRequest{}
- if client.conf.Version.IsAtLeast(V0_10_1_0) {
- request.Version = 1
- }
- request.AddBlock(topic, partitionID, time, 1)
-
- response, err := broker.GetAvailableOffsets(request)
- if err != nil {
- _ = broker.Close()
- return -1, err
- }
-
- block := response.GetBlock(topic, partitionID)
- if block == nil {
- _ = broker.Close()
- return -1, ErrIncompleteResponse
- }
- if block.Err != ErrNoError {
- return -1, block.Err
- }
- if len(block.Offsets) != 1 {
- return -1, ErrOffsetOutOfRange
- }
-
- return block.Offsets[0], nil
-}
-
-// core metadata update logic
-
-func (client *client) backgroundMetadataUpdater() {
- defer close(client.closed)
-
- if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
- return
- }
-
- ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- if err := client.refreshMetadata(); err != nil {
- Logger.Println("Client background metadata update:", err)
- }
- case <-client.closer:
- return
- }
- }
-}
-
-func (client *client) refreshMetadata() error {
- topics := []string{}
-
- if !client.conf.Metadata.Full {
- if specificTopics, err := client.MetadataTopics(); err != nil {
- return err
- } else if len(specificTopics) == 0 {
- return ErrNoTopicsToUpdateMetadata
- } else {
- topics = specificTopics
- }
- }
-
- if err := client.RefreshMetadata(topics...); err != nil {
- return err
- }
-
- return nil
-}
-
-func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
- pastDeadline := func(backoff time.Duration) bool {
- if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
- // we are past the deadline
- return true
- }
- return false
- }
- retry := func(err error) error {
- if attemptsRemaining > 0 {
- backoff := client.computeBackoff(attemptsRemaining)
- if pastDeadline(backoff) {
- Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
- return err
- }
- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
- if backoff > 0 {
- time.Sleep(backoff)
- }
- return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
- }
- return err
- }
-
- broker := client.any()
- for ; broker != nil && !pastDeadline(0); broker = client.any() {
- allowAutoTopicCreation := true
- if len(topics) > 0 {
- Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
- } else {
- allowAutoTopicCreation = false
- Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
- }
-
- req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
- if client.conf.Version.IsAtLeast(V1_0_0_0) {
- req.Version = 5
- } else if client.conf.Version.IsAtLeast(V0_10_0_0) {
- req.Version = 1
- }
- response, err := broker.GetMetadata(req)
- switch err.(type) {
- case nil:
- allKnownMetaData := len(topics) == 0
- // valid response, use it
- shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
- if shouldRetry {
- Logger.Println("client/metadata found some partitions to be leaderless")
- return retry(err) // note: err can be nil
- }
- return err
-
- case PacketEncodingError:
- // didn't even send, return the error
- return err
-
- case KError:
- // if SASL auth error return as this _should_ be a non retryable err for all brokers
- if err.(KError) == ErrSASLAuthenticationFailed {
- Logger.Println("client/metadata failed SASL authentication")
- return err
- }
- // else remove that broker and try again
- Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
- _ = broker.Close()
- client.deregisterBroker(broker)
-
- default:
- // some other error, remove that broker and try again
- Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
- _ = broker.Close()
- client.deregisterBroker(broker)
- }
- }
-
- if broker != nil {
- Logger.Println("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
- return retry(ErrOutOfBrokers)
- }
-
- Logger.Println("client/metadata no available broker to send metadata request to")
- client.resurrectDeadBrokers()
- return retry(ErrOutOfBrokers)
-}
-
-// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
-func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- // For all the brokers we received:
- // - if it is a new ID, save it
- // - if it is an existing ID, but the address we have is stale, discard the old one and save it
- // - otherwise ignore it, replacing our existing one would just bounce the connection
- for _, broker := range data.Brokers {
- client.registerBroker(broker)
- }
-
- client.controllerID = data.ControllerID
-
- if allKnownMetaData {
- client.metadata = make(map[string]map[int32]*PartitionMetadata)
- client.metadataTopics = make(map[string]none)
- client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
- }
- for _, topic := range data.Topics {
- // topics must be added firstly to `metadataTopics` to guarantee that all
- // requested topics must be recorded to keep them trackable for periodically
- // metadata refresh.
- if _, exists := client.metadataTopics[topic.Name]; !exists {
- client.metadataTopics[topic.Name] = none{}
- }
- delete(client.metadata, topic.Name)
- delete(client.cachedPartitionsResults, topic.Name)
-
- switch topic.Err {
- case ErrNoError:
- // no-op
- case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
- err = topic.Err
- continue
- case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
- err = topic.Err
- retry = true
- continue
- case ErrLeaderNotAvailable: // retry, but store partial partition results
- retry = true
- default: // don't retry, don't store partial results
- Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
- err = topic.Err
- continue
- }
-
- client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
- for _, partition := range topic.Partitions {
- client.metadata[topic.Name][partition.ID] = partition
- if partition.Err == ErrLeaderNotAvailable {
- retry = true
- }
- }
-
- var partitionCache [maxPartitionIndex][]int32
- partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
- partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
- client.cachedPartitionsResults[topic.Name] = partitionCache
- }
-
- return
-}
-
-func (client *client) cachedCoordinator(consumerGroup string) *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
- if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
- return client.brokers[coordinatorID]
- }
- return nil
-}
-
-func (client *client) cachedController() *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- return client.brokers[client.controllerID]
-}
-
-func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
- if client.conf.Metadata.Retry.BackoffFunc != nil {
- maxRetries := client.conf.Metadata.Retry.Max
- retries := maxRetries - attemptsRemaining
- return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
- }
- return client.conf.Metadata.Retry.Backoff
-}
-
-func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
- retry := func(err error) (*FindCoordinatorResponse, error) {
- if attemptsRemaining > 0 {
- backoff := client.computeBackoff(attemptsRemaining)
- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
- time.Sleep(backoff)
- return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
- }
- return nil, err
- }
-
- for broker := client.any(); broker != nil; broker = client.any() {
- Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
-
- request := new(FindCoordinatorRequest)
- request.CoordinatorKey = consumerGroup
- request.CoordinatorType = CoordinatorGroup
-
- response, err := broker.FindCoordinator(request)
-
- if err != nil {
- Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
-
- switch err.(type) {
- case PacketEncodingError:
- return nil, err
- default:
- _ = broker.Close()
- client.deregisterBroker(broker)
- continue
- }
- }
-
- switch response.Err {
- case ErrNoError:
- Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
- return response, nil
-
- case ErrConsumerCoordinatorNotAvailable:
- Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
-
- // This is very ugly, but this scenario will only happen once per cluster.
- // The __consumer_offsets topic only has to be created one time.
- // The number of partitions not configurable, but partition 0 should always exist.
- if _, err := client.Leader("__consumer_offsets", 0); err != nil {
- Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
- time.Sleep(2 * time.Second)
- }
-
- return retry(ErrConsumerCoordinatorNotAvailable)
- default:
- return nil, response.Err
- }
- }
-
- Logger.Println("client/coordinator no available broker to send consumer metadata request to")
- client.resurrectDeadBrokers()
- return retry(ErrOutOfBrokers)
-}
-
-// nopCloserClient embeds an existing Client, but disables
-// the Close method (yet all other methods pass
-// through unchanged). This is for use in larger structs
-// where it is undesirable to close the client that was
-// passed in by the caller.
-type nopCloserClient struct {
- Client
-}
-
-// Close intercepts and purposely does not call the underlying
-// client's Close() method.
-func (ncc *nopCloserClient) Close() error {
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/compress.go b/vendor/gopkg.in/Shopify/sarama.v1/compress.go
deleted file mode 100644
index 94b716e..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/compress.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package sarama
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "sync"
-
- "github.com/eapache/go-xerial-snappy"
- "github.com/pierrec/lz4"
-)
-
-var (
- lz4WriterPool = sync.Pool{
- New: func() interface{} {
- return lz4.NewWriter(nil)
- },
- }
-
- gzipWriterPool = sync.Pool{
- New: func() interface{} {
- return gzip.NewWriter(nil)
- },
- }
-)
-
-func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
- switch cc {
- case CompressionNone:
- return data, nil
- case CompressionGZIP:
- var (
- err error
- buf bytes.Buffer
- writer *gzip.Writer
- )
- if level != CompressionLevelDefault {
- writer, err = gzip.NewWriterLevel(&buf, level)
- if err != nil {
- return nil, err
- }
- } else {
- writer = gzipWriterPool.Get().(*gzip.Writer)
- defer gzipWriterPool.Put(writer)
- writer.Reset(&buf)
- }
- if _, err := writer.Write(data); err != nil {
- return nil, err
- }
- if err := writer.Close(); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
- case CompressionSnappy:
- return snappy.Encode(data), nil
- case CompressionLZ4:
- writer := lz4WriterPool.Get().(*lz4.Writer)
- defer lz4WriterPool.Put(writer)
-
- var buf bytes.Buffer
- writer.Reset(&buf)
-
- if _, err := writer.Write(data); err != nil {
- return nil, err
- }
- if err := writer.Close(); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
- case CompressionZSTD:
- return zstdCompressLevel(nil, data, level)
- default:
- return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/config.go b/vendor/gopkg.in/Shopify/sarama.v1/config.go
deleted file mode 100644
index e2e6513..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/config.go
+++ /dev/null
@@ -1,695 +0,0 @@
-package sarama
-
-import (
- "compress/gzip"
- "crypto/tls"
- "fmt"
- "io/ioutil"
- "net"
- "regexp"
- "time"
-
- "github.com/rcrowley/go-metrics"
- "golang.org/x/net/proxy"
-)
-
-const defaultClientID = "sarama"
-
-var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
-
-// Config is used to pass multiple configuration options to Sarama's constructors.
-type Config struct {
- // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
- Admin struct {
- // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
- // including topics, brokers, configurations and ACLs (defaults to 3 seconds).
- Timeout time.Duration
- }
-
- // Net is the namespace for network-level properties used by the Broker, and
- // shared by the Client/Producer/Consumer.
- Net struct {
- // How many outstanding requests a connection is allowed to have before
- // sending on it blocks (default 5).
- MaxOpenRequests int
-
- // All three of the below configurations are similar to the
- // `socket.timeout.ms` setting in JVM kafka. All of them default
- // to 30 seconds.
- DialTimeout time.Duration // How long to wait for the initial connection.
- ReadTimeout time.Duration // How long to wait for a response.
- WriteTimeout time.Duration // How long to wait for a transmit.
-
- TLS struct {
- // Whether or not to use TLS when connecting to the broker
- // (defaults to false).
- Enable bool
- // The TLS configuration to use for secure connections if
- // enabled (defaults to nil).
- Config *tls.Config
- }
-
- // SASL based authentication with broker. While there are multiple SASL authentication methods
- // the current implementation is limited to plaintext (SASL/PLAIN) authentication
- SASL struct {
- // Whether or not to use SASL authentication when connecting to the broker
- // (defaults to false).
- Enable bool
- // SASLMechanism is the name of the enabled SASL mechanism.
- // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
- Mechanism SASLMechanism
- // Version is the SASL Protocol Version to use
- // Kafka > 1.x should use V1, except on Azure EventHub which use V0
- Version int16
- // Whether or not to send the Kafka SASL handshake first if enabled
- // (defaults to true). You should only set this to false if you're using
- // a non-Kafka SASL proxy.
- Handshake bool
- //username and password for SASL/PLAIN or SASL/SCRAM authentication
- User string
- Password string
- // authz id used for SASL/SCRAM authentication
- SCRAMAuthzID string
- // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
- // client used to perform the SCRAM exchange with the server.
- SCRAMClientGeneratorFunc func() SCRAMClient
- // TokenProvider is a user-defined callback for generating
- // access tokens for SASL/OAUTHBEARER auth. See the
- // AccessTokenProvider interface docs for proper implementation
- // guidelines.
- TokenProvider AccessTokenProvider
-
- GSSAPI GSSAPIConfig
- }
-
- // KeepAlive specifies the keep-alive period for an active network connection.
- // If zero, keep-alives are disabled. (default is 0: disabled).
- KeepAlive time.Duration
-
- // LocalAddr is the local address to use when dialing an
- // address. The address must be of a compatible type for the
- // network being dialed.
- // If nil, a local address is automatically chosen.
- LocalAddr net.Addr
-
- Proxy struct {
- // Whether or not to use proxy when connecting to the broker
- // (defaults to false).
- Enable bool
- // The proxy dialer to use enabled (defaults to nil).
- Dialer proxy.Dialer
- }
- }
-
- // Metadata is the namespace for metadata management properties used by the
- // Client, and shared by the Producer/Consumer.
- Metadata struct {
- Retry struct {
- // The total number of times to retry a metadata request when the
- // cluster is in the middle of a leader election (default 3).
- Max int
- // How long to wait for leader election to occur before retrying
- // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
- Backoff time.Duration
- // Called to compute backoff time dynamically. Useful for implementing
- // more sophisticated backoff strategies. This takes precedence over
- // `Backoff` if set.
- BackoffFunc func(retries, maxRetries int) time.Duration
- }
- // How frequently to refresh the cluster metadata in the background.
- // Defaults to 10 minutes. Set to 0 to disable. Similar to
- // `topic.metadata.refresh.interval.ms` in the JVM version.
- RefreshFrequency time.Duration
-
- // Whether to maintain a full set of metadata for all topics, or just
- // the minimal set that has been necessary so far. The full set is simpler
- // and usually more convenient, but can take up a substantial amount of
- // memory if you have many topics and partitions. Defaults to true.
- Full bool
-
- // How long to wait for a successful metadata response.
- // Disabled by default which means a metadata request against an unreachable
- // cluster (all brokers are unreachable or unresponsive) can take up to
- // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
- // to fail.
- Timeout time.Duration
- }
-
- // Producer is the namespace for configuration related to producing messages,
- // used by the Producer.
- Producer struct {
- // The maximum permitted size of a message (defaults to 1000000). Should be
- // set equal to or smaller than the broker's `message.max.bytes`.
- MaxMessageBytes int
- // The level of acknowledgement reliability needed from the broker (defaults
- // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
- // JVM producer.
- RequiredAcks RequiredAcks
- // The maximum duration the broker will wait the receipt of the number of
- // RequiredAcks (defaults to 10 seconds). This is only relevant when
- // RequiredAcks is set to WaitForAll or a number > 1. Only supports
- // millisecond resolution, nanoseconds will be truncated. Equivalent to
- // the JVM producer's `request.timeout.ms` setting.
- Timeout time.Duration
- // The type of compression to use on messages (defaults to no compression).
- // Similar to `compression.codec` setting of the JVM producer.
- Compression CompressionCodec
- // The level of compression to use on messages. The meaning depends
- // on the actual compression type used and defaults to default compression
- // level for the codec.
- CompressionLevel int
- // Generates partitioners for choosing the partition to send messages to
- // (defaults to hashing the message key). Similar to the `partitioner.class`
- // setting for the JVM producer.
- Partitioner PartitionerConstructor
- // If enabled, the producer will ensure that exactly one copy of each message is
- // written.
- Idempotent bool
-
- // Return specifies what channels will be populated. If they are set to true,
- // you must read from the respective channels to prevent deadlock. If,
- // however, this config is used to create a `SyncProducer`, both must be set
- // to true and you shall not read from the channels since the producer does
- // this internally.
- Return struct {
- // If enabled, successfully delivered messages will be returned on the
- // Successes channel (default disabled).
- Successes bool
-
- // If enabled, messages that failed to deliver will be returned on the
- // Errors channel, including error (default enabled).
- Errors bool
- }
-
- // The following config options control how often messages are batched up and
- // sent to the broker. By default, messages are sent as fast as possible, and
- // all messages received while the current batch is in-flight are placed
- // into the subsequent batch.
- Flush struct {
- // The best-effort number of bytes needed to trigger a flush. Use the
- // global sarama.MaxRequestSize to set a hard upper limit.
- Bytes int
- // The best-effort number of messages needed to trigger a flush. Use
- // `MaxMessages` to set a hard upper limit.
- Messages int
- // The best-effort frequency of flushes. Equivalent to
- // `queue.buffering.max.ms` setting of JVM producer.
- Frequency time.Duration
- // The maximum number of messages the producer will send in a single
- // broker request. Defaults to 0 for unlimited. Similar to
- // `queue.buffering.max.messages` in the JVM producer.
- MaxMessages int
- }
-
- Retry struct {
- // The total number of times to retry sending a message (default 3).
- // Similar to the `message.send.max.retries` setting of the JVM producer.
- Max int
- // How long to wait for the cluster to settle between retries
- // (default 100ms). Similar to the `retry.backoff.ms` setting of the
- // JVM producer.
- Backoff time.Duration
- // Called to compute backoff time dynamically. Useful for implementing
- // more sophisticated backoff strategies. This takes precedence over
- // `Backoff` if set.
- BackoffFunc func(retries, maxRetries int) time.Duration
- }
- }
-
- // Consumer is the namespace for configuration related to consuming messages,
- // used by the Consumer.
- Consumer struct {
-
- // Group is the namespace for configuring consumer group.
- Group struct {
- Session struct {
- // The timeout used to detect consumer failures when using Kafka's group management facility.
- // The consumer sends periodic heartbeats to indicate its liveness to the broker.
- // If no heartbeats are received by the broker before the expiration of this session timeout,
- // then the broker will remove this consumer from the group and initiate a rebalance.
- // Note that the value must be in the allowable range as configured in the broker configuration
- // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
- Timeout time.Duration
- }
- Heartbeat struct {
- // The expected time between heartbeats to the consumer coordinator when using Kafka's group
- // management facilities. Heartbeats are used to ensure that the consumer's session stays active and
- // to facilitate rebalancing when new consumers join or leave the group.
- // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
- // higher than 1/3 of that value.
- // It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
- Interval time.Duration
- }
- Rebalance struct {
- // Strategy for allocating topic partitions to members (default BalanceStrategyRange)
- Strategy BalanceStrategy
- // The maximum allowed time for each worker to join the group once a rebalance has begun.
- // This is basically a limit on the amount of time needed for all tasks to flush any pending
- // data and commit offsets. If the timeout is exceeded, then the worker will be removed from
- // the group, which will cause offset commit failures (default 60s).
- Timeout time.Duration
-
- Retry struct {
- // When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
- // the load to assign partitions to each consumer. If the set of consumers changes while
- // this assignment is taking place the rebalance will fail and retry. This setting controls
- // the maximum number of attempts before giving up (default 4).
- Max int
- // Backoff time between retries during rebalance (default 2s)
- Backoff time.Duration
- }
- }
- Member struct {
- // Custom metadata to include when joining the group. The user data for all joined members
- // can be retrieved by sending a DescribeGroupRequest to the broker that is the
- // coordinator for the group.
- UserData []byte
- }
- }
-
- Retry struct {
- // How long to wait after a failing to read from a partition before
- // trying again (default 2s).
- Backoff time.Duration
- // Called to compute backoff time dynamically. Useful for implementing
- // more sophisticated backoff strategies. This takes precedence over
- // `Backoff` if set.
- BackoffFunc func(retries int) time.Duration
- }
-
- // Fetch is the namespace for controlling how many bytes are retrieved by any
- // given request.
- Fetch struct {
- // The minimum number of message bytes to fetch in a request - the broker
- // will wait until at least this many are available. The default is 1,
- // as 0 causes the consumer to spin when no messages are available.
- // Equivalent to the JVM's `fetch.min.bytes`.
- Min int32
- // The default number of message bytes to fetch from the broker in each
- // request (default 1MB). This should be larger than the majority of
- // your messages, or else the consumer will spend a lot of time
- // negotiating sizes and not actually consuming. Similar to the JVM's
- // `fetch.message.max.bytes`.
- Default int32
- // The maximum number of message bytes to fetch from the broker in a
- // single request. Messages larger than this will return
- // ErrMessageTooLarge and will not be consumable, so you must be sure
- // this is at least as large as your largest message. Defaults to 0
- // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
- // global `sarama.MaxResponseSize` still applies.
- Max int32
- }
- // The maximum amount of time the broker will wait for Consumer.Fetch.Min
- // bytes to become available before it returns fewer than that anyways. The
- // default is 250ms, since 0 causes the consumer to spin when no events are
- // available. 100-500ms is a reasonable range for most cases. Kafka only
- // supports precision up to milliseconds; nanoseconds will be truncated.
- // Equivalent to the JVM's `fetch.wait.max.ms`.
- MaxWaitTime time.Duration
-
- // The maximum amount of time the consumer expects a message takes to
- // process for the user. If writing to the Messages channel takes longer
- // than this, that partition will stop fetching more messages until it
- // can proceed again.
- // Note that, since the Messages channel is buffered, the actual grace time is
- // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
- // If a message is not written to the Messages channel between two ticks
- // of the expiryTicker then a timeout is detected.
- // Using a ticker instead of a timer to detect timeouts should typically
- // result in many fewer calls to Timer functions which may result in a
- // significant performance improvement if many messages are being sent
- // and timeouts are infrequent.
- // The disadvantage of using a ticker instead of a timer is that
- // timeouts will be less accurate. That is, the effective timeout could
- // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
- // example, if `MaxProcessingTime` is 100ms then a delay of 180ms
- // between two messages being sent may not be recognized as a timeout.
- MaxProcessingTime time.Duration
-
- // Return specifies what channels will be populated. If they are set to true,
- // you must read from them to prevent deadlock.
- Return struct {
- // If enabled, any errors that occurred while consuming are returned on
- // the Errors channel (default disabled).
- Errors bool
- }
-
- // Offsets specifies configuration for how and when to commit consumed
- // offsets. This currently requires the manual use of an OffsetManager
- // but will eventually be automated.
- Offsets struct {
- // How frequently to commit updated offsets. Defaults to 1s.
- CommitInterval time.Duration
-
- // The initial offset to use if no offset was previously committed.
- // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
- Initial int64
-
- // The retention duration for committed offsets. If zero, disabled
- // (in which case the `offsets.retention.minutes` option on the
- // broker will be used). Kafka only supports precision up to
- // milliseconds; nanoseconds will be truncated. Requires Kafka
- // broker version 0.9.0 or later.
- // (default is 0: disabled).
- Retention time.Duration
-
- Retry struct {
- // The total number of times to retry failing commit
- // requests during OffsetManager shutdown (default 3).
- Max int
- }
- }
-
- // IsolationLevel support 2 mode:
- // - use `ReadUncommitted` (default) to consume and return all messages in message channel
- // - use `ReadCommitted` to hide messages that are part of an aborted transaction
- IsolationLevel IsolationLevel
- }
-
- // A user-provided string sent with every request to the brokers for logging,
- // debugging, and auditing purposes. Defaults to "sarama", but you should
- // probably set it to something specific to your application.
- ClientID string
- // The number of events to buffer in internal and external channels. This
- // permits the producer and consumer to continue processing some messages
- // in the background while user code is working, greatly improving throughput.
- // Defaults to 256.
- ChannelBufferSize int
- // The version of Kafka that Sarama will assume it is running against.
- // Defaults to the oldest supported stable version. Since Kafka provides
- // backwards-compatibility, setting it to a version older than you have
- // will not break anything, although it may prevent you from using the
- // latest features. Setting it to a version greater than you are actually
- // running may lead to random breakage.
- Version KafkaVersion
- // The registry to define metrics into.
- // Defaults to a local registry.
- // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
- // prior to starting Sarama.
- // See Examples on how to use the metrics registry
- MetricRegistry metrics.Registry
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
- c := &Config{}
-
- c.Admin.Timeout = 3 * time.Second
-
- c.Net.MaxOpenRequests = 5
- c.Net.DialTimeout = 30 * time.Second
- c.Net.ReadTimeout = 30 * time.Second
- c.Net.WriteTimeout = 30 * time.Second
- c.Net.SASL.Handshake = true
- c.Net.SASL.Version = SASLHandshakeV0
-
- c.Metadata.Retry.Max = 3
- c.Metadata.Retry.Backoff = 250 * time.Millisecond
- c.Metadata.RefreshFrequency = 10 * time.Minute
- c.Metadata.Full = true
-
- c.Producer.MaxMessageBytes = 1000000
- c.Producer.RequiredAcks = WaitForLocal
- c.Producer.Timeout = 10 * time.Second
- c.Producer.Partitioner = NewHashPartitioner
- c.Producer.Retry.Max = 3
- c.Producer.Retry.Backoff = 100 * time.Millisecond
- c.Producer.Return.Errors = true
- c.Producer.CompressionLevel = CompressionLevelDefault
-
- c.Consumer.Fetch.Min = 1
- c.Consumer.Fetch.Default = 1024 * 1024
- c.Consumer.Retry.Backoff = 2 * time.Second
- c.Consumer.MaxWaitTime = 250 * time.Millisecond
- c.Consumer.MaxProcessingTime = 100 * time.Millisecond
- c.Consumer.Return.Errors = false
- c.Consumer.Offsets.CommitInterval = 1 * time.Second
- c.Consumer.Offsets.Initial = OffsetNewest
- c.Consumer.Offsets.Retry.Max = 3
-
- c.Consumer.Group.Session.Timeout = 10 * time.Second
- c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
- c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
- c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
- c.Consumer.Group.Rebalance.Retry.Max = 4
- c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
-
- c.ClientID = defaultClientID
- c.ChannelBufferSize = 256
- c.Version = MinVersion
- c.MetricRegistry = metrics.NewRegistry()
-
- return c
-}
-
-// Validate checks a Config instance. It will return a
-// ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
- // some configuration values should be warned on but not fail completely, do those first
- if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
- Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
- }
- if !c.Net.SASL.Enable {
- if c.Net.SASL.User != "" {
- Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
- }
- if c.Net.SASL.Password != "" {
- Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
- }
- }
- if c.Producer.RequiredAcks > 1 {
- Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
- }
- if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
- Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
- }
- if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
- Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
- }
- if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
- Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
- }
- if c.Producer.Timeout%time.Millisecond != 0 {
- Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
- }
- if c.Consumer.MaxWaitTime < 100*time.Millisecond {
- Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
- }
- if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
- Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
- Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
- Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
- Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
- Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.ClientID == defaultClientID {
- Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
- }
-
- // validate Net values
- switch {
- case c.Net.MaxOpenRequests <= 0:
- return ConfigurationError("Net.MaxOpenRequests must be > 0")
- case c.Net.DialTimeout <= 0:
- return ConfigurationError("Net.DialTimeout must be > 0")
- case c.Net.ReadTimeout <= 0:
- return ConfigurationError("Net.ReadTimeout must be > 0")
- case c.Net.WriteTimeout <= 0:
- return ConfigurationError("Net.WriteTimeout must be > 0")
- case c.Net.KeepAlive < 0:
- return ConfigurationError("Net.KeepAlive must be >= 0")
- case c.Net.SASL.Enable:
- if c.Net.SASL.Mechanism == "" {
- c.Net.SASL.Mechanism = SASLTypePlaintext
- }
-
- switch c.Net.SASL.Mechanism {
- case SASLTypePlaintext:
- if c.Net.SASL.User == "" {
- return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
- }
- if c.Net.SASL.Password == "" {
- return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
- }
- case SASLTypeOAuth:
- if c.Net.SASL.TokenProvider == nil {
- return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
- }
- case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
- if c.Net.SASL.User == "" {
- return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
- }
- if c.Net.SASL.Password == "" {
- return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
- }
- if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
- return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
- }
- case SASLTypeGSSAPI:
- if c.Net.SASL.GSSAPI.ServiceName == "" {
- return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
- }
-
- if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
- if c.Net.SASL.GSSAPI.Password == "" {
- return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
- "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
- }
- } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
- if c.Net.SASL.GSSAPI.KeyTabPath == "" {
- return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
- " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
- }
- } else {
- return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
- }
- if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
- return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
- }
- if c.Net.SASL.GSSAPI.Username == "" {
- return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
- }
- if c.Net.SASL.GSSAPI.Realm == "" {
- return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
- }
- default:
- msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
- SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
- return ConfigurationError(msg)
- }
- }
-
- // validate the Admin values
- switch {
- case c.Admin.Timeout <= 0:
- return ConfigurationError("Admin.Timeout must be > 0")
- }
-
- // validate the Metadata values
- switch {
- case c.Metadata.Retry.Max < 0:
- return ConfigurationError("Metadata.Retry.Max must be >= 0")
- case c.Metadata.Retry.Backoff < 0:
- return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
- case c.Metadata.RefreshFrequency < 0:
- return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
- }
-
- // validate the Producer values
- switch {
- case c.Producer.MaxMessageBytes <= 0:
- return ConfigurationError("Producer.MaxMessageBytes must be > 0")
- case c.Producer.RequiredAcks < -1:
- return ConfigurationError("Producer.RequiredAcks must be >= -1")
- case c.Producer.Timeout <= 0:
- return ConfigurationError("Producer.Timeout must be > 0")
- case c.Producer.Partitioner == nil:
- return ConfigurationError("Producer.Partitioner must not be nil")
- case c.Producer.Flush.Bytes < 0:
- return ConfigurationError("Producer.Flush.Bytes must be >= 0")
- case c.Producer.Flush.Messages < 0:
- return ConfigurationError("Producer.Flush.Messages must be >= 0")
- case c.Producer.Flush.Frequency < 0:
- return ConfigurationError("Producer.Flush.Frequency must be >= 0")
- case c.Producer.Flush.MaxMessages < 0:
- return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
- case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
- return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
- case c.Producer.Retry.Max < 0:
- return ConfigurationError("Producer.Retry.Max must be >= 0")
- case c.Producer.Retry.Backoff < 0:
- return ConfigurationError("Producer.Retry.Backoff must be >= 0")
- }
-
- if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
- return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
- }
-
- if c.Producer.Compression == CompressionGZIP {
- if c.Producer.CompressionLevel != CompressionLevelDefault {
- if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
- return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
- }
- }
- }
-
- if c.Producer.Idempotent {
- if !c.Version.IsAtLeast(V0_11_0_0) {
- return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
- }
- if c.Producer.Retry.Max == 0 {
- return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
- }
- if c.Producer.RequiredAcks != WaitForAll {
- return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
- }
- if c.Net.MaxOpenRequests > 1 {
- return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
- }
- }
-
- // validate the Consumer values
- switch {
- case c.Consumer.Fetch.Min <= 0:
- return ConfigurationError("Consumer.Fetch.Min must be > 0")
- case c.Consumer.Fetch.Default <= 0:
- return ConfigurationError("Consumer.Fetch.Default must be > 0")
- case c.Consumer.Fetch.Max < 0:
- return ConfigurationError("Consumer.Fetch.Max must be >= 0")
- case c.Consumer.MaxWaitTime < 1*time.Millisecond:
- return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
- case c.Consumer.MaxProcessingTime <= 0:
- return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
- case c.Consumer.Retry.Backoff < 0:
- return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
- case c.Consumer.Offsets.CommitInterval <= 0:
- return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
- case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
- return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
- case c.Consumer.Offsets.Retry.Max < 0:
- return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
- case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
- return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
- }
-
- // validate IsolationLevel
- if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
- return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
- }
-
- // validate the Consumer Group values
- switch {
- case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
- return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
- case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
- return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
- case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
- return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
- case c.Consumer.Group.Rebalance.Strategy == nil:
- return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
- case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
- return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
- case c.Consumer.Group.Rebalance.Retry.Max < 0:
- return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
- case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
- return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
- }
-
- // validate misc shared values
- switch {
- case c.ChannelBufferSize < 0:
- return ConfigurationError("ChannelBufferSize must be >= 0")
- case !validID.MatchString(c.ClientID):
- return ConfigurationError("ClientID is invalid")
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/config_resource_type.go b/vendor/gopkg.in/Shopify/sarama.v1/config_resource_type.go
deleted file mode 100644
index 5399d75..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/config_resource_type.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package sarama
-
-//ConfigResourceType is a type for config resource
-type ConfigResourceType int8
-
-// Taken from :
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
-
-const (
- //UnknownResource constant type
- UnknownResource ConfigResourceType = iota
- //AnyResource constant type
- AnyResource
- //TopicResource constant type
- TopicResource
- //GroupResource constant type
- GroupResource
- //ClusterResource constant type
- ClusterResource
- //BrokerResource constant type
- BrokerResource
-)
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer.go
deleted file mode 100644
index 72c4d7c..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/consumer.go
+++ /dev/null
@@ -1,896 +0,0 @@
-package sarama
-
-import (
- "errors"
- "fmt"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// ConsumerMessage encapsulates a Kafka message returned by the consumer.
-type ConsumerMessage struct {
- Headers []*RecordHeader // only set if kafka is version 0.11+
- Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
- BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
-
- Key, Value []byte
- Topic string
- Partition int32
- Offset int64
-}
-
-// ConsumerError is what is provided to the user when an error occurs.
-// It wraps an error and includes the topic and partition.
-type ConsumerError struct {
- Topic string
- Partition int32
- Err error
-}
-
-func (ce ConsumerError) Error() string {
- return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
-}
-
-// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
-// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
-// when stopping.
-type ConsumerErrors []*ConsumerError
-
-func (ce ConsumerErrors) Error() string {
- return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
-}
-
-// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
-// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
-// scope.
-type Consumer interface {
- // Topics returns the set of available topics as retrieved from the cluster
- // metadata. This method is the same as Client.Topics(), and is provided for
- // convenience.
- Topics() ([]string, error)
-
- // Partitions returns the sorted list of all partition IDs for the given topic.
- // This method is the same as Client.Partitions(), and is provided for convenience.
- Partitions(topic string) ([]int32, error)
-
- // ConsumePartition creates a PartitionConsumer on the given topic/partition with
- // the given offset. It will return an error if this Consumer is already consuming
- // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
- // or OffsetOldest
- ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
-
- // HighWaterMarks returns the current high water marks for each topic and partition.
- // Consistency between partitions is not guaranteed since high water marks are updated separately.
- HighWaterMarks() map[string]map[int32]int64
-
- // Close shuts down the consumer. It must be called after all child
- // PartitionConsumers have already been closed.
- Close() error
-}
-
-type consumer struct {
- conf *Config
- children map[string]map[int32]*partitionConsumer
- brokerConsumers map[*Broker]*brokerConsumer
- client Client
- lock sync.Mutex
-}
-
-// NewConsumer creates a new consumer using the given broker addresses and configuration.
-func NewConsumer(addrs []string, config *Config) (Consumer, error) {
- client, err := NewClient(addrs, config)
- if err != nil {
- return nil, err
- }
- return newConsumer(client)
-}
-
-// NewConsumerFromClient creates a new consumer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-func NewConsumerFromClient(client Client) (Consumer, error) {
- // For clients passed in by the client, ensure we don't
- // call Close() on it.
- cli := &nopCloserClient{client}
- return newConsumer(cli)
-}
-
-func newConsumer(client Client) (Consumer, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- c := &consumer{
- client: client,
- conf: client.Config(),
- children: make(map[string]map[int32]*partitionConsumer),
- brokerConsumers: make(map[*Broker]*brokerConsumer),
- }
-
- return c, nil
-}
-
-func (c *consumer) Close() error {
- return c.client.Close()
-}
-
-func (c *consumer) Topics() ([]string, error) {
- return c.client.Topics()
-}
-
-func (c *consumer) Partitions(topic string) ([]int32, error) {
- return c.client.Partitions(topic)
-}
-
-func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
- child := &partitionConsumer{
- consumer: c,
- conf: c.conf,
- topic: topic,
- partition: partition,
- messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
- errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
- feeder: make(chan *FetchResponse, 1),
- trigger: make(chan none, 1),
- dying: make(chan none),
- fetchSize: c.conf.Consumer.Fetch.Default,
- }
-
- if err := child.chooseStartingOffset(offset); err != nil {
- return nil, err
- }
-
- var leader *Broker
- var err error
- if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
- return nil, err
- }
-
- if err := c.addChild(child); err != nil {
- return nil, err
- }
-
- go withRecover(child.dispatcher)
- go withRecover(child.responseFeeder)
-
- child.broker = c.refBrokerConsumer(leader)
- child.broker.input <- child
-
- return child, nil
-}
-
-func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- hwms := make(map[string]map[int32]int64)
- for topic, p := range c.children {
- hwm := make(map[int32]int64, len(p))
- for partition, pc := range p {
- hwm[partition] = pc.HighWaterMarkOffset()
- }
- hwms[topic] = hwm
- }
-
- return hwms
-}
-
-func (c *consumer) addChild(child *partitionConsumer) error {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- topicChildren := c.children[child.topic]
- if topicChildren == nil {
- topicChildren = make(map[int32]*partitionConsumer)
- c.children[child.topic] = topicChildren
- }
-
- if topicChildren[child.partition] != nil {
- return ConfigurationError("That topic/partition is already being consumed")
- }
-
- topicChildren[child.partition] = child
- return nil
-}
-
-func (c *consumer) removeChild(child *partitionConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- delete(c.children[child.topic], child.partition)
-}
-
-func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- bc := c.brokerConsumers[broker]
- if bc == nil {
- bc = c.newBrokerConsumer(broker)
- c.brokerConsumers[broker] = bc
- }
-
- bc.refs++
-
- return bc
-}
-
-func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- brokerWorker.refs--
-
- if brokerWorker.refs == 0 {
- close(brokerWorker.input)
- if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
- delete(c.brokerConsumers, brokerWorker.broker)
- }
- }
-}
-
-func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- delete(c.brokerConsumers, brokerWorker.broker)
-}
-
-// PartitionConsumer
-
-// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
-// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
-// of scope.
-//
-// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
-// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
-// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
-// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
-// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
-// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
-// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
-//
-// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
-// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
-// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
-// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
-// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
-type PartitionConsumer interface {
- // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
- // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
- // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
- // this before calling Close on the underlying client.
- AsyncClose()
-
- // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
- // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
- // the Messages channel when this function is called, you will be competing with Close for messages; consider
- // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
- // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
- Close() error
-
- // Messages returns the read channel for the messages that are returned by
- // the broker.
- Messages() <-chan *ConsumerMessage
-
- // Errors returns a read channel of errors that occurred during consuming, if
- // enabled. By default, errors are logged and not returned over this channel.
- // If you want to implement any custom error handling, set your config's
- // Consumer.Return.Errors setting to true, and read from this channel.
- Errors() <-chan *ConsumerError
-
- // HighWaterMarkOffset returns the high water mark offset of the partition,
- // i.e. the offset that will be used for the next message that will be produced.
- // You can use this to determine how far behind the processing is.
- HighWaterMarkOffset() int64
-}
-
-type partitionConsumer struct {
- highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
-
- consumer *consumer
- conf *Config
- broker *brokerConsumer
- messages chan *ConsumerMessage
- errors chan *ConsumerError
- feeder chan *FetchResponse
-
- trigger, dying chan none
- closeOnce sync.Once
- topic string
- partition int32
- responseResult error
- fetchSize int32
- offset int64
- retries int32
-}
-
-var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
-
-func (child *partitionConsumer) sendError(err error) {
- cErr := &ConsumerError{
- Topic: child.topic,
- Partition: child.partition,
- Err: err,
- }
-
- if child.conf.Consumer.Return.Errors {
- child.errors <- cErr
- } else {
- Logger.Println(cErr)
- }
-}
-
-func (child *partitionConsumer) computeBackoff() time.Duration {
- if child.conf.Consumer.Retry.BackoffFunc != nil {
- retries := atomic.AddInt32(&child.retries, 1)
- return child.conf.Consumer.Retry.BackoffFunc(int(retries))
- }
- return child.conf.Consumer.Retry.Backoff
-}
-
-func (child *partitionConsumer) dispatcher() {
- for range child.trigger {
- select {
- case <-child.dying:
- close(child.trigger)
- case <-time.After(child.computeBackoff()):
- if child.broker != nil {
- child.consumer.unrefBrokerConsumer(child.broker)
- child.broker = nil
- }
-
- Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
- if err := child.dispatch(); err != nil {
- child.sendError(err)
- child.trigger <- none{}
- }
- }
- }
-
- if child.broker != nil {
- child.consumer.unrefBrokerConsumer(child.broker)
- }
- child.consumer.removeChild(child)
- close(child.feeder)
-}
-
-func (child *partitionConsumer) dispatch() error {
- if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
- return err
- }
-
- var leader *Broker
- var err error
- if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
- return err
- }
-
- child.broker = child.consumer.refBrokerConsumer(leader)
-
- child.broker.input <- child
-
- return nil
-}
-
-func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
- newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
- if err != nil {
- return err
- }
- oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
- if err != nil {
- return err
- }
-
- switch {
- case offset == OffsetNewest:
- child.offset = newestOffset
- case offset == OffsetOldest:
- child.offset = oldestOffset
- case offset >= oldestOffset && offset <= newestOffset:
- child.offset = offset
- default:
- return ErrOffsetOutOfRange
- }
-
- return nil
-}
-
-func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
- return child.messages
-}
-
-func (child *partitionConsumer) Errors() <-chan *ConsumerError {
- return child.errors
-}
-
-func (child *partitionConsumer) AsyncClose() {
- // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
- // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
- // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
- // also just close itself)
- child.closeOnce.Do(func() {
- close(child.dying)
- })
-}
-
-func (child *partitionConsumer) Close() error {
- child.AsyncClose()
-
- var errors ConsumerErrors
- for err := range child.errors {
- errors = append(errors, err)
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (child *partitionConsumer) HighWaterMarkOffset() int64 {
- return atomic.LoadInt64(&child.highWaterMarkOffset)
-}
-
-func (child *partitionConsumer) responseFeeder() {
- var msgs []*ConsumerMessage
- expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
- firstAttempt := true
-
-feederLoop:
- for response := range child.feeder {
- msgs, child.responseResult = child.parseResponse(response)
-
- if child.responseResult == nil {
- atomic.StoreInt32(&child.retries, 0)
- }
-
- for i, msg := range msgs {
- messageSelect:
- select {
- case <-child.dying:
- child.broker.acks.Done()
- continue feederLoop
- case child.messages <- msg:
- firstAttempt = true
- case <-expiryTicker.C:
- if !firstAttempt {
- child.responseResult = errTimedOut
- child.broker.acks.Done()
- remainingLoop:
- for _, msg = range msgs[i:] {
- select {
- case child.messages <- msg:
- case <-child.dying:
- break remainingLoop
- }
- }
- child.broker.input <- child
- continue feederLoop
- } else {
- // current message has not been sent, return to select
- // statement
- firstAttempt = false
- goto messageSelect
- }
- }
- }
-
- child.broker.acks.Done()
- }
-
- expiryTicker.Stop()
- close(child.messages)
- close(child.errors)
-}
-
-func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
- var messages []*ConsumerMessage
- for _, msgBlock := range msgSet.Messages {
- for _, msg := range msgBlock.Messages() {
- offset := msg.Offset
- timestamp := msg.Msg.Timestamp
- if msg.Msg.Version >= 1 {
- baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
- offset += baseOffset
- if msg.Msg.LogAppendTime {
- timestamp = msgBlock.Msg.Timestamp
- }
- }
- if offset < child.offset {
- continue
- }
- messages = append(messages, &ConsumerMessage{
- Topic: child.topic,
- Partition: child.partition,
- Key: msg.Msg.Key,
- Value: msg.Msg.Value,
- Offset: offset,
- Timestamp: timestamp,
- BlockTimestamp: msgBlock.Msg.Timestamp,
- })
- child.offset = offset + 1
- }
- }
- if len(messages) == 0 {
- child.offset++
- }
- return messages, nil
-}
-
-func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
- messages := make([]*ConsumerMessage, 0, len(batch.Records))
-
- for _, rec := range batch.Records {
- offset := batch.FirstOffset + rec.OffsetDelta
- if offset < child.offset {
- continue
- }
- timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
- if batch.LogAppendTime {
- timestamp = batch.MaxTimestamp
- }
- messages = append(messages, &ConsumerMessage{
- Topic: child.topic,
- Partition: child.partition,
- Key: rec.Key,
- Value: rec.Value,
- Offset: offset,
- Timestamp: timestamp,
- Headers: rec.Headers,
- })
- child.offset = offset + 1
- }
- if len(messages) == 0 {
- child.offset++
- }
- return messages, nil
-}
-
-func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
- var (
- metricRegistry = child.conf.MetricRegistry
- consumerBatchSizeMetric metrics.Histogram
- )
-
- if metricRegistry != nil {
- consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
- }
-
- // If request was throttled and empty we log and return without error
- if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
- Logger.Printf(
- "consumer/broker/%d FetchResponse throttled %v\n",
- child.broker.broker.ID(), response.ThrottleTime)
- return nil, nil
- }
-
- block := response.GetBlock(child.topic, child.partition)
- if block == nil {
- return nil, ErrIncompleteResponse
- }
-
- if block.Err != ErrNoError {
- return nil, block.Err
- }
-
- nRecs, err := block.numRecords()
- if err != nil {
- return nil, err
- }
-
- consumerBatchSizeMetric.Update(int64(nRecs))
-
- if nRecs == 0 {
- partialTrailingMessage, err := block.isPartial()
- if err != nil {
- return nil, err
- }
- // We got no messages. If we got a trailing one then we need to ask for more data.
- // Otherwise we just poll again and wait for one to be produced...
- if partialTrailingMessage {
- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
- // we can't ask for more data, we've hit the configured limit
- child.sendError(ErrMessageTooLarge)
- child.offset++ // skip this one so we can keep processing future messages
- } else {
- child.fetchSize *= 2
- // check int32 overflow
- if child.fetchSize < 0 {
- child.fetchSize = math.MaxInt32
- }
- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
- child.fetchSize = child.conf.Consumer.Fetch.Max
- }
- }
- }
-
- return nil, nil
- }
-
- // we got messages, reset our fetch size in case it was increased for a previous request
- child.fetchSize = child.conf.Consumer.Fetch.Default
- atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
-
- // abortedProducerIDs contains producerID which message should be ignored as uncommitted
- // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
- // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
- abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
- abortedTransactions := block.getAbortedTransactions()
-
- messages := []*ConsumerMessage{}
- for _, records := range block.RecordsSet {
- switch records.recordsType {
- case legacyRecords:
- messageSetMessages, err := child.parseMessages(records.MsgSet)
- if err != nil {
- return nil, err
- }
-
- messages = append(messages, messageSetMessages...)
- case defaultRecords:
- // Consume remaining abortedTransaction up to last offset of current batch
- for _, txn := range abortedTransactions {
- if txn.FirstOffset > records.RecordBatch.LastOffset() {
- break
- }
- abortedProducerIDs[txn.ProducerID] = struct{}{}
- // Pop abortedTransactions so that we never add it again
- abortedTransactions = abortedTransactions[1:]
- }
-
- recordBatchMessages, err := child.parseRecords(records.RecordBatch)
- if err != nil {
- return nil, err
- }
-
- // Parse and commit offset but do not expose messages that are:
- // - control records
- // - part of an aborted transaction when set to `ReadCommitted`
-
- // control record
- isControl, err := records.isControl()
- if err != nil {
- // I don't know why there is this continue in case of error to begin with
- // Safe bet is to ignore control messages if ReadUncommitted
- // and block on them in case of error and ReadCommitted
- if child.conf.Consumer.IsolationLevel == ReadCommitted {
- return nil, err
- }
- continue
- }
- if isControl {
- controlRecord, err := records.getControlRecord()
- if err != nil {
- return nil, err
- }
-
- if controlRecord.Type == ControlRecordAbort {
- delete(abortedProducerIDs, records.RecordBatch.ProducerID)
- }
- continue
- }
-
- // filter aborted transactions
- if child.conf.Consumer.IsolationLevel == ReadCommitted {
- _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
- if records.RecordBatch.IsTransactional && isAborted {
- continue
- }
- }
-
- messages = append(messages, recordBatchMessages...)
- default:
- return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
- }
- }
-
- return messages, nil
-}
-
-type brokerConsumer struct {
- consumer *consumer
- broker *Broker
- input chan *partitionConsumer
- newSubscriptions chan []*partitionConsumer
- subscriptions map[*partitionConsumer]none
- wait chan none
- acks sync.WaitGroup
- refs int
-}
-
-func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
- bc := &brokerConsumer{
- consumer: c,
- broker: broker,
- input: make(chan *partitionConsumer),
- newSubscriptions: make(chan []*partitionConsumer),
- wait: make(chan none),
- subscriptions: make(map[*partitionConsumer]none),
- refs: 0,
- }
-
- go withRecover(bc.subscriptionManager)
- go withRecover(bc.subscriptionConsumer)
-
- return bc
-}
-
-// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
-// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
-// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
-// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
-// so the main goroutine can block waiting for work if it has none.
-func (bc *brokerConsumer) subscriptionManager() {
- var buffer []*partitionConsumer
-
- for {
- if len(buffer) > 0 {
- select {
- case event, ok := <-bc.input:
- if !ok {
- goto done
- }
- buffer = append(buffer, event)
- case bc.newSubscriptions <- buffer:
- buffer = nil
- case bc.wait <- none{}:
- }
- } else {
- select {
- case event, ok := <-bc.input:
- if !ok {
- goto done
- }
- buffer = append(buffer, event)
- case bc.newSubscriptions <- nil:
- }
- }
- }
-
-done:
- close(bc.wait)
- if len(buffer) > 0 {
- bc.newSubscriptions <- buffer
- }
- close(bc.newSubscriptions)
-}
-
-//subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
-func (bc *brokerConsumer) subscriptionConsumer() {
- <-bc.wait // wait for our first piece of work
-
- for newSubscriptions := range bc.newSubscriptions {
- bc.updateSubscriptions(newSubscriptions)
-
- if len(bc.subscriptions) == 0 {
- // We're about to be shut down or we're about to receive more subscriptions.
- // Either way, the signal just hasn't propagated to our goroutine yet.
- <-bc.wait
- continue
- }
-
- response, err := bc.fetchNewMessages()
-
- if err != nil {
- Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
- bc.abort(err)
- return
- }
-
- bc.acks.Add(len(bc.subscriptions))
- for child := range bc.subscriptions {
- child.feeder <- response
- }
- bc.acks.Wait()
- bc.handleResponses()
- }
-}
-
-func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
- for _, child := range newSubscriptions {
- bc.subscriptions[child] = none{}
- Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
- }
-
- for child := range bc.subscriptions {
- select {
- case <-child.dying:
- Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
- close(child.trigger)
- delete(bc.subscriptions, child)
- default:
- // no-op
- }
- }
-}
-
-//handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
-func (bc *brokerConsumer) handleResponses() {
- for child := range bc.subscriptions {
- result := child.responseResult
- child.responseResult = nil
-
- switch result {
- case nil:
- // no-op
- case errTimedOut:
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
- bc.broker.ID(), child.topic, child.partition)
- delete(bc.subscriptions, child)
- case ErrOffsetOutOfRange:
- // there's no point in retrying this it will just fail the same way again
- // shut it down and force the user to choose what to do
- child.sendError(result)
- Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
- close(child.trigger)
- delete(bc.subscriptions, child)
- case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
- // not an error, but does need redispatching
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
- bc.broker.ID(), child.topic, child.partition, result)
- child.trigger <- none{}
- delete(bc.subscriptions, child)
- default:
- // dunno, tell the user and try redispatching
- child.sendError(result)
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
- bc.broker.ID(), child.topic, child.partition, result)
- child.trigger <- none{}
- delete(bc.subscriptions, child)
- }
- }
-}
-
-func (bc *brokerConsumer) abort(err error) {
- bc.consumer.abandonBrokerConsumer(bc)
- _ = bc.broker.Close() // we don't care about the error this might return, we already have one
-
- for child := range bc.subscriptions {
- child.sendError(err)
- child.trigger <- none{}
- }
-
- for newSubscriptions := range bc.newSubscriptions {
- if len(newSubscriptions) == 0 {
- <-bc.wait
- continue
- }
- for _, child := range newSubscriptions {
- child.sendError(err)
- child.trigger <- none{}
- }
- }
-}
-
-func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
- request := &FetchRequest{
- MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
- MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
- request.Version = 1
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
- request.Version = 2
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
- request.Version = 3
- request.MaxBytes = MaxResponseSize
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 4
- request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
- }
-
- for child := range bc.subscriptions {
- request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
- }
-
- return bc.broker.Fetch(request)
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer_group.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer_group.go
deleted file mode 100644
index 8de9513..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/consumer_group.go
+++ /dev/null
@@ -1,801 +0,0 @@
-package sarama
-
-import (
- "context"
- "errors"
- "fmt"
- "sort"
- "sync"
- "time"
-)
-
-// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
-var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
-
-// ConsumerGroup is responsible for dividing up processing of topics and partitions
-// over a collection of processes (the members of the consumer group).
-type ConsumerGroup interface {
- // Consume joins a cluster of consumers for a given list of topics and
- // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
- //
- // The life-cycle of a session is represented by the following steps:
- //
- // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
- // and is assigned their "fair share" of partitions, aka 'claims'.
- // 2. Before processing starts, the handler's Setup() hook is called to notify the user
- // of the claims and allow any necessary preparation or alteration of state.
- // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
- // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
- // from concurrent reads/writes.
- // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
- // parent context is cancelled or when a server-side rebalance cycle is initiated.
- // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
- // to allow the user to perform any final tasks before a rebalance.
- // 6. Finally, marked offsets are committed one last time before claims are released.
- //
- // Please note, that once a rebalance is triggered, sessions must be completed within
- // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
- // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
- // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
- // commit failures.
- Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
-
- // Errors returns a read channel of errors that occurred during the consumer life-cycle.
- // By default, errors are logged and not returned over this channel.
- // If you want to implement any custom error handling, set your config's
- // Consumer.Return.Errors setting to true, and read from this channel.
- Errors() <-chan error
-
- // Close stops the ConsumerGroup and detaches any running sessions. It is required to call
- // this function before the object passes out of scope, as it will otherwise leak memory.
- Close() error
-}
-
-type consumerGroup struct {
- client Client
-
- config *Config
- consumer Consumer
- groupID string
- memberID string
- errors chan error
-
- lock sync.Mutex
- closed chan none
- closeOnce sync.Once
-}
-
-// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
-func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
- client, err := NewClient(addrs, config)
- if err != nil {
- return nil, err
- }
-
- c, err := newConsumerGroup(groupID, client)
- if err != nil {
- _ = client.Close()
- }
- return c, err
-}
-
-// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-// PLEASE NOTE: consumer groups can only re-use but not share clients.
-func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
- // For clients passed in by the client, ensure we don't
- // call Close() on it.
- cli := &nopCloserClient{client}
- return newConsumerGroup(groupID, cli)
-}
-
-func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
- config := client.Config()
- if !config.Version.IsAtLeast(V0_10_2_0) {
- return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
- }
-
- consumer, err := NewConsumerFromClient(client)
- if err != nil {
- return nil, err
- }
-
- return &consumerGroup{
- client: client,
- consumer: consumer,
- config: config,
- groupID: groupID,
- errors: make(chan error, config.ChannelBufferSize),
- closed: make(chan none),
- }, nil
-}
-
-// Errors implements ConsumerGroup.
-func (c *consumerGroup) Errors() <-chan error { return c.errors }
-
-// Close implements ConsumerGroup.
-func (c *consumerGroup) Close() (err error) {
- c.closeOnce.Do(func() {
- close(c.closed)
-
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // leave group
- if e := c.leave(); e != nil {
- err = e
- }
-
- // drain errors
- go func() {
- close(c.errors)
- }()
- for e := range c.errors {
- err = e
- }
-
- if e := c.client.Close(); e != nil {
- err = e
- }
- })
- return
-}
-
-// Consume implements ConsumerGroup.
-func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
- // Ensure group is not closed
- select {
- case <-c.closed:
- return ErrClosedConsumerGroup
- default:
- }
-
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Quick exit when no topics are provided
- if len(topics) == 0 {
- return fmt.Errorf("no topics provided")
- }
-
- // Refresh metadata for requested topics
- if err := c.client.RefreshMetadata(topics...); err != nil {
- return err
- }
-
- // Init session
- sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
- if err == ErrClosedClient {
- return ErrClosedConsumerGroup
- } else if err != nil {
- return err
- }
-
- // Wait for session exit signal
- <-sess.ctx.Done()
-
- // Gracefully release session claims
- return sess.release(true)
-}
-
-func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
- select {
- case <-c.closed:
- return nil, ErrClosedConsumerGroup
- case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
- }
-
- if refreshCoordinator {
- err := c.client.RefreshCoordinator(c.groupID)
- if err != nil {
- return c.retryNewSession(ctx, topics, handler, retries, true)
- }
- }
-
- return c.newSession(ctx, topics, handler, retries-1)
-}
-
-func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
- coordinator, err := c.client.Coordinator(c.groupID)
- if err != nil {
- if retries <= 0 {
- return nil, err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, true)
- }
-
- // Join consumer group
- join, err := c.joinGroupRequest(coordinator, topics)
- if err != nil {
- _ = coordinator.Close()
- return nil, err
- }
- switch join.Err {
- case ErrNoError:
- c.memberID = join.MemberId
- case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
- c.memberID = ""
- return c.newSession(ctx, topics, handler, retries)
- case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
- if retries <= 0 {
- return nil, join.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, true)
- case ErrRebalanceInProgress: // retry after backoff
- if retries <= 0 {
- return nil, join.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, false)
- default:
- return nil, join.Err
- }
-
- // Prepare distribution plan if we joined as the leader
- var plan BalanceStrategyPlan
- if join.LeaderId == join.MemberId {
- members, err := join.GetMembers()
- if err != nil {
- return nil, err
- }
-
- plan, err = c.balance(members)
- if err != nil {
- return nil, err
- }
- }
-
- // Sync consumer group
- sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
- if err != nil {
- _ = coordinator.Close()
- return nil, err
- }
- switch sync.Err {
- case ErrNoError:
- case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
- c.memberID = ""
- return c.newSession(ctx, topics, handler, retries)
- case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
- if retries <= 0 {
- return nil, sync.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, true)
- case ErrRebalanceInProgress: // retry after backoff
- if retries <= 0 {
- return nil, sync.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, false)
- default:
- return nil, sync.Err
- }
-
- // Retrieve and sort claims
- var claims map[string][]int32
- if len(sync.MemberAssignment) > 0 {
- members, err := sync.GetMemberAssignment()
- if err != nil {
- return nil, err
- }
- claims = members.Topics
-
- for _, partitions := range claims {
- sort.Sort(int32Slice(partitions))
- }
- }
-
- return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
-}
-
-func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
- req := &JoinGroupRequest{
- GroupId: c.groupID,
- MemberId: c.memberID,
- SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
- ProtocolType: "consumer",
- }
- if c.config.Version.IsAtLeast(V0_10_1_0) {
- req.Version = 1
- req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
- }
-
- meta := &ConsumerGroupMemberMetadata{
- Topics: topics,
- UserData: c.config.Consumer.Group.Member.UserData,
- }
- strategy := c.config.Consumer.Group.Rebalance.Strategy
- if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
- return nil, err
- }
-
- return coordinator.JoinGroup(req)
-}
-
-func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
- req := &SyncGroupRequest{
- GroupId: c.groupID,
- MemberId: c.memberID,
- GenerationId: generationID,
- }
- for memberID, topics := range plan {
- err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{
- Topics: topics,
- })
- if err != nil {
- return nil, err
- }
- }
- return coordinator.SyncGroup(req)
-}
-
-func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
- req := &HeartbeatRequest{
- GroupId: c.groupID,
- MemberId: memberID,
- GenerationId: generationID,
- }
-
- return coordinator.Heartbeat(req)
-}
-
-func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
- topics := make(map[string][]int32)
- for _, meta := range members {
- for _, topic := range meta.Topics {
- topics[topic] = nil
- }
- }
-
- for topic := range topics {
- partitions, err := c.client.Partitions(topic)
- if err != nil {
- return nil, err
- }
- topics[topic] = partitions
- }
-
- strategy := c.config.Consumer.Group.Rebalance.Strategy
- return strategy.Plan(members, topics)
-}
-
-// Leaves the cluster, called by Close, protected by lock.
-func (c *consumerGroup) leave() error {
- if c.memberID == "" {
- return nil
- }
-
- coordinator, err := c.client.Coordinator(c.groupID)
- if err != nil {
- return err
- }
-
- resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
- GroupId: c.groupID,
- MemberId: c.memberID,
- })
- if err != nil {
- _ = coordinator.Close()
- return err
- }
-
- // Unset memberID
- c.memberID = ""
-
- // Check response
- switch resp.Err {
- case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
- return nil
- default:
- return resp.Err
- }
-}
-
-func (c *consumerGroup) handleError(err error, topic string, partition int32) {
- select {
- case <-c.closed:
- return
- default:
- }
-
- if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
- err = &ConsumerError{
- Topic: topic,
- Partition: partition,
- Err: err,
- }
- }
-
- if c.config.Consumer.Return.Errors {
- select {
- case c.errors <- err:
- default:
- }
- } else {
- Logger.Println(err)
- }
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupSession represents a consumer group member session.
-type ConsumerGroupSession interface {
- // Claims returns information about the claimed partitions by topic.
- Claims() map[string][]int32
-
- // MemberID returns the cluster member ID.
- MemberID() string
-
- // GenerationID returns the current generation ID.
- GenerationID() int32
-
- // MarkOffset marks the provided offset, alongside a metadata string
- // that represents the state of the partition consumer at that point in time. The
- // metadata string can be used by another consumer to restore that state, so it
- // can resume consumption.
- //
- // To follow upstream conventions, you are expected to mark the offset of the
- // next message to read, not the last message read. Thus, when calling `MarkOffset`
- // you should typically add one to the offset of the last consumed message.
- //
- // Note: calling MarkOffset does not necessarily commit the offset to the backend
- // store immediately for efficiency reasons, and it may never be committed if
- // your application crashes. This means that you may end up processing the same
- // message twice, and your processing should ideally be idempotent.
- MarkOffset(topic string, partition int32, offset int64, metadata string)
-
- // ResetOffset resets to the provided offset, alongside a metadata string that
- // represents the state of the partition consumer at that point in time. Reset
- // acts as a counterpart to MarkOffset, the difference being that it allows to
- // reset an offset to an earlier or smaller value, where MarkOffset only
- // allows incrementing the offset. cf MarkOffset for more details.
- ResetOffset(topic string, partition int32, offset int64, metadata string)
-
- // MarkMessage marks a message as consumed.
- MarkMessage(msg *ConsumerMessage, metadata string)
-
- // Context returns the session context.
- Context() context.Context
-}
-
-type consumerGroupSession struct {
- parent *consumerGroup
- memberID string
- generationID int32
- handler ConsumerGroupHandler
-
- claims map[string][]int32
- offsets *offsetManager
- ctx context.Context
- cancel func()
-
- waitGroup sync.WaitGroup
- releaseOnce sync.Once
- hbDying, hbDead chan none
-}
-
-func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
- // init offset manager
- offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
- if err != nil {
- return nil, err
- }
-
- // init context
- ctx, cancel := context.WithCancel(ctx)
-
- // init session
- sess := &consumerGroupSession{
- parent: parent,
- memberID: memberID,
- generationID: generationID,
- handler: handler,
- offsets: offsets,
- claims: claims,
- ctx: ctx,
- cancel: cancel,
- hbDying: make(chan none),
- hbDead: make(chan none),
- }
-
- // start heartbeat loop
- go sess.heartbeatLoop()
-
- // create a POM for each claim
- for topic, partitions := range claims {
- for _, partition := range partitions {
- pom, err := offsets.ManagePartition(topic, partition)
- if err != nil {
- _ = sess.release(false)
- return nil, err
- }
-
- // handle POM errors
- go func(topic string, partition int32) {
- for err := range pom.Errors() {
- sess.parent.handleError(err, topic, partition)
- }
- }(topic, partition)
- }
- }
-
- // perform setup
- if err := handler.Setup(sess); err != nil {
- _ = sess.release(true)
- return nil, err
- }
-
- // start consuming
- for topic, partitions := range claims {
- for _, partition := range partitions {
- sess.waitGroup.Add(1)
-
- go func(topic string, partition int32) {
- defer sess.waitGroup.Done()
-
- // cancel the as session as soon as the first
- // goroutine exits
- defer sess.cancel()
-
- // consume a single topic/partition, blocking
- sess.consume(topic, partition)
- }(topic, partition)
- }
- }
- return sess, nil
-}
-
-func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
-func (s *consumerGroupSession) MemberID() string { return s.memberID }
-func (s *consumerGroupSession) GenerationID() int32 { return s.generationID }
-
-func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
- if pom := s.offsets.findPOM(topic, partition); pom != nil {
- pom.MarkOffset(offset, metadata)
- }
-}
-
-func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
- if pom := s.offsets.findPOM(topic, partition); pom != nil {
- pom.ResetOffset(offset, metadata)
- }
-}
-
-func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
- s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
-}
-
-func (s *consumerGroupSession) Context() context.Context {
- return s.ctx
-}
-
-func (s *consumerGroupSession) consume(topic string, partition int32) {
- // quick exit if rebalance is due
- select {
- case <-s.ctx.Done():
- return
- case <-s.parent.closed:
- return
- default:
- }
-
- // get next offset
- offset := s.parent.config.Consumer.Offsets.Initial
- if pom := s.offsets.findPOM(topic, partition); pom != nil {
- offset, _ = pom.NextOffset()
- }
-
- // create new claim
- claim, err := newConsumerGroupClaim(s, topic, partition, offset)
- if err != nil {
- s.parent.handleError(err, topic, partition)
- return
- }
-
- // handle errors
- go func() {
- for err := range claim.Errors() {
- s.parent.handleError(err, topic, partition)
- }
- }()
-
- // trigger close when session is done
- go func() {
- select {
- case <-s.ctx.Done():
- case <-s.parent.closed:
- }
- claim.AsyncClose()
- }()
-
- // start processing
- if err := s.handler.ConsumeClaim(s, claim); err != nil {
- s.parent.handleError(err, topic, partition)
- }
-
- // ensure consumer is closed & drained
- claim.AsyncClose()
- for _, err := range claim.waitClosed() {
- s.parent.handleError(err, topic, partition)
- }
-}
-
-func (s *consumerGroupSession) release(withCleanup bool) (err error) {
- // signal release, stop heartbeat
- s.cancel()
-
- // wait for consumers to exit
- s.waitGroup.Wait()
-
- // perform release
- s.releaseOnce.Do(func() {
- if withCleanup {
- if e := s.handler.Cleanup(s); e != nil {
- s.parent.handleError(e, "", -1)
- err = e
- }
- }
-
- if e := s.offsets.Close(); e != nil {
- err = e
- }
-
- close(s.hbDying)
- <-s.hbDead
- })
-
- return
-}
-
-func (s *consumerGroupSession) heartbeatLoop() {
- defer close(s.hbDead)
- defer s.cancel() // trigger the end of the session on exit
-
- pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
- defer pause.Stop()
-
- retries := s.parent.config.Metadata.Retry.Max
- for {
- coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
- if err != nil {
- if retries <= 0 {
- s.parent.handleError(err, "", -1)
- return
- }
-
- select {
- case <-s.hbDying:
- return
- case <-time.After(s.parent.config.Metadata.Retry.Backoff):
- retries--
- }
- continue
- }
-
- resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
- if err != nil {
- _ = coordinator.Close()
-
- if retries <= 0 {
- s.parent.handleError(err, "", -1)
- return
- }
-
- retries--
- continue
- }
-
- switch resp.Err {
- case ErrNoError:
- retries = s.parent.config.Metadata.Retry.Max
- case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
- return
- default:
- s.parent.handleError(err, "", -1)
- return
- }
-
- select {
- case <-pause.C:
- case <-s.hbDying:
- return
- }
- }
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
-// It also provides hooks for your consumer group session life-cycle and allow you to
-// trigger logic before or after the consume loop(s).
-//
-// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
-// ensure that all state is safely protected against race conditions.
-type ConsumerGroupHandler interface {
- // Setup is run at the beginning of a new session, before ConsumeClaim.
- Setup(ConsumerGroupSession) error
-
- // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
- // but before the offsets are committed for the very last time.
- Cleanup(ConsumerGroupSession) error
-
- // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
- // Once the Messages() channel is closed, the Handler must finish its processing
- // loop and exit.
- ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
-}
-
-// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
-type ConsumerGroupClaim interface {
- // Topic returns the consumed topic name.
- Topic() string
-
- // Partition returns the consumed partition.
- Partition() int32
-
- // InitialOffset returns the initial offset that was used as a starting point for this claim.
- InitialOffset() int64
-
- // HighWaterMarkOffset returns the high water mark offset of the partition,
- // i.e. the offset that will be used for the next message that will be produced.
- // You can use this to determine how far behind the processing is.
- HighWaterMarkOffset() int64
-
- // Messages returns the read channel for the messages that are returned by
- // the broker. The messages channel will be closed when a new rebalance cycle
- // is due. You must finish processing and mark offsets within
- // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
- // re-assigned to another group member.
- Messages() <-chan *ConsumerMessage
-}
-
-type consumerGroupClaim struct {
- topic string
- partition int32
- offset int64
- PartitionConsumer
-}
-
-func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
- pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
- if err == ErrOffsetOutOfRange {
- offset = sess.parent.config.Consumer.Offsets.Initial
- pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
- }
- if err != nil {
- return nil, err
- }
-
- go func() {
- for err := range pcm.Errors() {
- sess.parent.handleError(err, topic, partition)
- }
- }()
-
- return &consumerGroupClaim{
- topic: topic,
- partition: partition,
- offset: offset,
- PartitionConsumer: pcm,
- }, nil
-}
-
-func (c *consumerGroupClaim) Topic() string { return c.topic }
-func (c *consumerGroupClaim) Partition() int32 { return c.partition }
-func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
-
-// Drains messages and errors, ensures the claim is fully closed.
-func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
- go func() {
- for range c.Messages() {
- }
- }()
-
- for err := range c.Errors() {
- errs = append(errs, err)
- }
- return
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer_group_members.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer_group_members.go
deleted file mode 100644
index 2d02cc3..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/consumer_group_members.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package sarama
-
-//ConsumerGroupMemberMetadata holds the metadata for consumer group
-type ConsumerGroupMemberMetadata struct {
- Version int16
- Topics []string
- UserData []byte
-}
-
-func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
- pe.putInt16(m.Version)
-
- if err := pe.putStringArray(m.Topics); err != nil {
- return err
- }
-
- if err := pe.putBytes(m.UserData); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
- if m.Version, err = pd.getInt16(); err != nil {
- return
- }
-
- if m.Topics, err = pd.getStringArray(); err != nil {
- return
- }
-
- if m.UserData, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
-
-//ConsumerGroupMemberAssignment holds the member assignment for a consume group
-type ConsumerGroupMemberAssignment struct {
- Version int16
- Topics map[string][]int32
- UserData []byte
-}
-
-func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
- pe.putInt16(m.Version)
-
- if err := pe.putArrayLength(len(m.Topics)); err != nil {
- return err
- }
-
- for topic, partitions := range m.Topics {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putInt32Array(partitions); err != nil {
- return err
- }
- }
-
- if err := pe.putBytes(m.UserData); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
- if m.Version, err = pd.getInt16(); err != nil {
- return
- }
-
- var topicLen int
- if topicLen, err = pd.getArrayLength(); err != nil {
- return
- }
-
- m.Topics = make(map[string][]int32, topicLen)
- for i := 0; i < topicLen; i++ {
- var topic string
- if topic, err = pd.getString(); err != nil {
- return
- }
- if m.Topics[topic], err = pd.getInt32Array(); err != nil {
- return
- }
- }
-
- if m.UserData, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_request.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_request.go
deleted file mode 100644
index a8dcaef..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_request.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package sarama
-
-//ConsumerMetadataRequest is used for metadata requests
-type ConsumerMetadataRequest struct {
- ConsumerGroup string
-}
-
-func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
- tmp := new(FindCoordinatorRequest)
- tmp.CoordinatorKey = r.ConsumerGroup
- tmp.CoordinatorType = CoordinatorGroup
- return tmp.encode(pe)
-}
-
-func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
- tmp := new(FindCoordinatorRequest)
- if err := tmp.decode(pd, version); err != nil {
- return err
- }
- r.ConsumerGroup = tmp.CoordinatorKey
- return nil
-}
-
-func (r *ConsumerMetadataRequest) key() int16 {
- return 10
-}
-
-func (r *ConsumerMetadataRequest) version() int16 {
- return 0
-}
-
-func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
- return V0_8_2_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_response.go b/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_response.go
deleted file mode 100644
index f39a871..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/consumer_metadata_response.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package sarama
-
-import (
- "net"
- "strconv"
-)
-
-//ConsumerMetadataResponse holds the response for a consumer group meta data requests
-type ConsumerMetadataResponse struct {
- Err KError
- Coordinator *Broker
- CoordinatorID int32 // deprecated: use Coordinator.ID()
- CoordinatorHost string // deprecated: use Coordinator.Addr()
- CoordinatorPort int32 // deprecated: use Coordinator.Addr()
-}
-
-func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
- tmp := new(FindCoordinatorResponse)
-
- if err := tmp.decode(pd, version); err != nil {
- return err
- }
-
- r.Err = tmp.Err
-
- r.Coordinator = tmp.Coordinator
- if tmp.Coordinator == nil {
- return nil
- }
-
- // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
- // backwards compatibility
- host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
- if err != nil {
- return err
- }
- port, err := strconv.ParseInt(portstr, 10, 32)
- if err != nil {
- return err
- }
- r.CoordinatorID = r.Coordinator.ID()
- r.CoordinatorHost = host
- r.CoordinatorPort = int32(port)
-
- return nil
-}
-
-func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
- if r.Coordinator == nil {
- r.Coordinator = new(Broker)
- r.Coordinator.id = r.CoordinatorID
- r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
- }
-
- tmp := &FindCoordinatorResponse{
- Version: 0,
- Err: r.Err,
- Coordinator: r.Coordinator,
- }
-
- if err := tmp.encode(pe); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *ConsumerMetadataResponse) key() int16 {
- return 10
-}
-
-func (r *ConsumerMetadataResponse) version() int16 {
- return 0
-}
-
-func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
- return V0_8_2_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/control_record.go b/vendor/gopkg.in/Shopify/sarama.v1/control_record.go
deleted file mode 100644
index 9b75ab5..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/control_record.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package sarama
-
-//ControlRecordType ...
-type ControlRecordType int
-
-const (
- //ControlRecordAbort is a control record for abort
- ControlRecordAbort ControlRecordType = iota
- //ControlRecordCommit is a control record for commit
- ControlRecordCommit
- //ControlRecordUnknown is a control record of unknown type
- ControlRecordUnknown
-)
-
-// Control records are returned as a record by fetchRequest
-// However unlike "normal" records, they mean nothing application wise.
-// They only serve internal logic for supporting transactions.
-type ControlRecord struct {
- Version int16
- CoordinatorEpoch int32
- Type ControlRecordType
-}
-
-func (cr *ControlRecord) decode(key, value packetDecoder) error {
- var err error
- cr.Version, err = value.getInt16()
- if err != nil {
- return err
- }
-
- cr.CoordinatorEpoch, err = value.getInt32()
- if err != nil {
- return err
- }
-
- // There a version for the value part AND the key part. And I have no idea if they are supposed to match or not
- // Either way, all these version can only be 0 for now
- cr.Version, err = key.getInt16()
- if err != nil {
- return err
- }
-
- recordType, err := key.getInt16()
- if err != nil {
- return err
- }
-
- switch recordType {
- case 0:
- cr.Type = ControlRecordAbort
- case 1:
- cr.Type = ControlRecordCommit
- default:
- // from JAVA implementation:
- // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored
- cr.Type = ControlRecordUnknown
- }
- return nil
-}
-
-func (cr *ControlRecord) encode(key, value packetEncoder) {
- value.putInt16(cr.Version)
- value.putInt32(cr.CoordinatorEpoch)
- key.putInt16(cr.Version)
-
- switch cr.Type {
- case ControlRecordAbort:
- key.putInt16(0)
- case ControlRecordCommit:
- key.putInt16(1)
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/crc32_field.go b/vendor/gopkg.in/Shopify/sarama.v1/crc32_field.go
deleted file mode 100644
index 38189a3..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/crc32_field.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "fmt"
- "hash/crc32"
- "sync"
-)
-
-type crcPolynomial int8
-
-const (
- crcIEEE crcPolynomial = iota
- crcCastagnoli
-)
-
-var crc32FieldPool = sync.Pool{}
-
-func acquireCrc32Field(polynomial crcPolynomial) *crc32Field {
- val := crc32FieldPool.Get()
- if val != nil {
- c := val.(*crc32Field)
- c.polynomial = polynomial
- return c
- }
- return newCRC32Field(polynomial)
-}
-
-func releaseCrc32Field(c *crc32Field) {
- crc32FieldPool.Put(c)
-}
-
-var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
-
-// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
-type crc32Field struct {
- startOffset int
- polynomial crcPolynomial
-}
-
-func (c *crc32Field) saveOffset(in int) {
- c.startOffset = in
-}
-
-func (c *crc32Field) reserveLength() int {
- return 4
-}
-
-func newCRC32Field(polynomial crcPolynomial) *crc32Field {
- return &crc32Field{polynomial: polynomial}
-}
-
-func (c *crc32Field) run(curOffset int, buf []byte) error {
- crc, err := c.crc(curOffset, buf)
- if err != nil {
- return err
- }
- binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
- return nil
-}
-
-func (c *crc32Field) check(curOffset int, buf []byte) error {
- crc, err := c.crc(curOffset, buf)
- if err != nil {
- return err
- }
-
- expected := binary.BigEndian.Uint32(buf[c.startOffset:])
- if crc != expected {
- return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
- }
-
- return nil
-}
-func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
- var tab *crc32.Table
- switch c.polynomial {
- case crcIEEE:
- tab = crc32.IEEETable
- case crcCastagnoli:
- tab = castagnoliTable
- default:
- return 0, PacketDecodingError{"invalid CRC type"}
- }
- return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/create_partitions_request.go b/vendor/gopkg.in/Shopify/sarama.v1/create_partitions_request.go
deleted file mode 100644
index af321e9..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/create_partitions_request.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package sarama
-
-import "time"
-
-type CreatePartitionsRequest struct {
- TopicPartitions map[string]*TopicPartition
- Timeout time.Duration
- ValidateOnly bool
-}
-
-func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
- return err
- }
-
- for topic, partition := range c.TopicPartitions {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := partition.encode(pe); err != nil {
- return err
- }
- }
-
- pe.putInt32(int32(c.Timeout / time.Millisecond))
-
- pe.putBool(c.ValidateOnly)
-
- return nil
-}
-
-func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- c.TopicPartitions = make(map[string]*TopicPartition, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicPartitions[topic] = new(TopicPartition)
- if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.Timeout = time.Duration(timeout) * time.Millisecond
-
- if c.ValidateOnly, err = pd.getBool(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *CreatePartitionsRequest) key() int16 {
- return 37
-}
-
-func (r *CreatePartitionsRequest) version() int16 {
- return 0
-}
-
-func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
-
-type TopicPartition struct {
- Count int32
- Assignment [][]int32
-}
-
-func (t *TopicPartition) encode(pe packetEncoder) error {
- pe.putInt32(t.Count)
-
- if len(t.Assignment) == 0 {
- pe.putInt32(-1)
- return nil
- }
-
- if err := pe.putArrayLength(len(t.Assignment)); err != nil {
- return err
- }
-
- for _, assign := range t.Assignment {
- if err := pe.putInt32Array(assign); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
- if t.Count, err = pd.getInt32(); err != nil {
- return err
- }
-
- n, err := pd.getInt32()
- if err != nil {
- return err
- }
- if n <= 0 {
- return nil
- }
- t.Assignment = make([][]int32, n)
-
- for i := 0; i < int(n); i++ {
- if t.Assignment[i], err = pd.getInt32Array(); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/create_partitions_response.go b/vendor/gopkg.in/Shopify/sarama.v1/create_partitions_response.go
deleted file mode 100644
index bb18204..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/create_partitions_response.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-type CreatePartitionsResponse struct {
- ThrottleTime time.Duration
- TopicPartitionErrors map[string]*TopicPartitionError
-}
-
-func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
- if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
- return err
- }
-
- for topic, partitionError := range c.TopicPartitionErrors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := partitionError.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicPartitionErrors[topic] = new(TopicPartitionError)
- if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *CreatePartitionsResponse) key() int16 {
- return 37
-}
-
-func (r *CreatePartitionsResponse) version() int16 {
- return 0
-}
-
-func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
-
-type TopicPartitionError struct {
- Err KError
- ErrMsg *string
-}
-
-func (t *TopicPartitionError) Error() string {
- text := t.Err.Error()
- if t.ErrMsg != nil {
- text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
- }
- return text
-}
-
-func (t *TopicPartitionError) encode(pe packetEncoder) error {
- pe.putInt16(int16(t.Err))
-
- if err := pe.putNullableString(t.ErrMsg); err != nil {
- return err
- }
-
- return nil
-}
-
-func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- t.Err = KError(kerr)
-
- if t.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/create_topics_request.go b/vendor/gopkg.in/Shopify/sarama.v1/create_topics_request.go
deleted file mode 100644
index 709c0a4..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/create_topics_request.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-type CreateTopicsRequest struct {
- Version int16
-
- TopicDetails map[string]*TopicDetail
- Timeout time.Duration
- ValidateOnly bool
-}
-
-func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
- return err
- }
- for topic, detail := range c.TopicDetails {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := detail.encode(pe); err != nil {
- return err
- }
- }
-
- pe.putInt32(int32(c.Timeout / time.Millisecond))
-
- if c.Version >= 1 {
- pe.putBool(c.ValidateOnly)
- }
-
- return nil
-}
-
-func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.TopicDetails = make(map[string]*TopicDetail, n)
-
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicDetails[topic] = new(TopicDetail)
- if err = c.TopicDetails[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.Timeout = time.Duration(timeout) * time.Millisecond
-
- if version >= 1 {
- c.ValidateOnly, err = pd.getBool()
- if err != nil {
- return err
- }
-
- c.Version = version
- }
-
- return nil
-}
-
-func (c *CreateTopicsRequest) key() int16 {
- return 19
-}
-
-func (c *CreateTopicsRequest) version() int16 {
- return c.Version
-}
-
-func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
- switch c.Version {
- case 2:
- return V1_0_0_0
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
-
-type TopicDetail struct {
- NumPartitions int32
- ReplicationFactor int16
- ReplicaAssignment map[int32][]int32
- ConfigEntries map[string]*string
-}
-
-func (t *TopicDetail) encode(pe packetEncoder) error {
- pe.putInt32(t.NumPartitions)
- pe.putInt16(t.ReplicationFactor)
-
- if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
- return err
- }
- for partition, assignment := range t.ReplicaAssignment {
- pe.putInt32(partition)
- if err := pe.putInt32Array(assignment); err != nil {
- return err
- }
- }
-
- if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
- return err
- }
- for configKey, configValue := range t.ConfigEntries {
- if err := pe.putString(configKey); err != nil {
- return err
- }
- if err := pe.putNullableString(configValue); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
- if t.NumPartitions, err = pd.getInt32(); err != nil {
- return err
- }
- if t.ReplicationFactor, err = pd.getInt16(); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.ReplicaAssignment = make(map[int32][]int32, n)
- for i := 0; i < n; i++ {
- replica, err := pd.getInt32()
- if err != nil {
- return err
- }
- if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
- return err
- }
- }
- }
-
- n, err = pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.ConfigEntries = make(map[string]*string, n)
- for i := 0; i < n; i++ {
- configKey, err := pd.getString()
- if err != nil {
- return err
- }
- if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/create_topics_response.go b/vendor/gopkg.in/Shopify/sarama.v1/create_topics_response.go
deleted file mode 100644
index a493e02..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/create_topics_response.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-type CreateTopicsResponse struct {
- Version int16
- ThrottleTime time.Duration
- TopicErrors map[string]*TopicError
-}
-
-func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
- if c.Version >= 2 {
- pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
- }
-
- if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
- return err
- }
- for topic, topicError := range c.TopicErrors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := topicError.encode(pe, c.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
- c.Version = version
-
- if version >= 2 {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.TopicErrors = make(map[string]*TopicError, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicErrors[topic] = new(TopicError)
- if err := c.TopicErrors[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateTopicsResponse) key() int16 {
- return 19
-}
-
-func (c *CreateTopicsResponse) version() int16 {
- return c.Version
-}
-
-func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
- switch c.Version {
- case 2:
- return V1_0_0_0
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
-
-type TopicError struct {
- Err KError
- ErrMsg *string
-}
-
-func (t *TopicError) Error() string {
- text := t.Err.Error()
- if t.ErrMsg != nil {
- text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
- }
- return text
-}
-
-func (t *TopicError) encode(pe packetEncoder, version int16) error {
- pe.putInt16(int16(t.Err))
-
- if version >= 1 {
- if err := pe.putNullableString(t.ErrMsg); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
- kErr, err := pd.getInt16()
- if err != nil {
- return err
- }
- t.Err = KError(kErr)
-
- if version >= 1 {
- if t.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/decompress.go b/vendor/gopkg.in/Shopify/sarama.v1/decompress.go
deleted file mode 100644
index eaccbfc..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/decompress.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package sarama
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "sync"
-
- "github.com/eapache/go-xerial-snappy"
- "github.com/pierrec/lz4"
-)
-
-var (
- lz4ReaderPool = sync.Pool{
- New: func() interface{} {
- return lz4.NewReader(nil)
- },
- }
-
- gzipReaderPool sync.Pool
-)
-
-func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
- switch cc {
- case CompressionNone:
- return data, nil
- case CompressionGZIP:
- var (
- err error
- reader *gzip.Reader
- readerIntf = gzipReaderPool.Get()
- )
- if readerIntf != nil {
- reader = readerIntf.(*gzip.Reader)
- } else {
- reader, err = gzip.NewReader(bytes.NewReader(data))
- if err != nil {
- return nil, err
- }
- }
-
- defer gzipReaderPool.Put(reader)
-
- if err := reader.Reset(bytes.NewReader(data)); err != nil {
- return nil, err
- }
-
- return ioutil.ReadAll(reader)
- case CompressionSnappy:
- return snappy.Decode(data)
- case CompressionLZ4:
- reader := lz4ReaderPool.Get().(*lz4.Reader)
- defer lz4ReaderPool.Put(reader)
-
- reader.Reset(bytes.NewReader(data))
- return ioutil.ReadAll(reader)
- case CompressionZSTD:
- return zstdDecompress(nil, data)
- default:
- return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/delete_groups_request.go b/vendor/gopkg.in/Shopify/sarama.v1/delete_groups_request.go
deleted file mode 100644
index 305a324..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/delete_groups_request.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package sarama
-
-type DeleteGroupsRequest struct {
- Groups []string
-}
-
-func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
- return pe.putStringArray(r.Groups)
-}
-
-func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Groups, err = pd.getStringArray()
- return
-}
-
-func (r *DeleteGroupsRequest) key() int16 {
- return 42
-}
-
-func (r *DeleteGroupsRequest) version() int16 {
- return 0
-}
-
-func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
- return V1_1_0_0
-}
-
-func (r *DeleteGroupsRequest) AddGroup(group string) {
- r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/delete_groups_response.go b/vendor/gopkg.in/Shopify/sarama.v1/delete_groups_response.go
deleted file mode 100644
index c067ebb..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/delete_groups_response.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-type DeleteGroupsResponse struct {
- ThrottleTime time.Duration
- GroupErrorCodes map[string]KError
-}
-
-func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
- return err
- }
- for groupID, errorCode := range r.GroupErrorCodes {
- if err := pe.putString(groupID); err != nil {
- return err
- }
- pe.putInt16(int16(errorCode))
- }
-
- return nil
-}
-
-func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.GroupErrorCodes = make(map[string]KError, n)
- for i := 0; i < n; i++ {
- groupID, err := pd.getString()
- if err != nil {
- return err
- }
- errorCode, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.GroupErrorCodes[groupID] = KError(errorCode)
- }
-
- return nil
-}
-
-func (r *DeleteGroupsResponse) key() int16 {
- return 42
-}
-
-func (r *DeleteGroupsResponse) version() int16 {
- return 0
-}
-
-func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
- return V1_1_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/delete_records_request.go b/vendor/gopkg.in/Shopify/sarama.v1/delete_records_request.go
deleted file mode 100644
index 93efafd..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/delete_records_request.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package sarama
-
-import (
- "sort"
- "time"
-)
-
-// request message format is:
-// [topic] timeout(int32)
-// where topic is:
-// name(string) [partition]
-// where partition is:
-// id(int32) offset(int64)
-
-type DeleteRecordsRequest struct {
- Topics map[string]*DeleteRecordsRequestTopic
- Timeout time.Duration
-}
-
-func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(d.Topics)); err != nil {
- return err
- }
- keys := make([]string, 0, len(d.Topics))
- for topic := range d.Topics {
- keys = append(keys, topic)
- }
- sort.Strings(keys)
- for _, topic := range keys {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := d.Topics[topic].encode(pe); err != nil {
- return err
- }
- }
- pe.putInt32(int32(d.Timeout / time.Millisecond))
-
- return nil
-}
-
-func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- details := new(DeleteRecordsRequestTopic)
- if err = details.decode(pd, version); err != nil {
- return err
- }
- d.Topics[topic] = details
- }
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.Timeout = time.Duration(timeout) * time.Millisecond
-
- return nil
-}
-
-func (d *DeleteRecordsRequest) key() int16 {
- return 21
-}
-
-func (d *DeleteRecordsRequest) version() int16 {
- return 0
-}
-
-func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-type DeleteRecordsRequestTopic struct {
- PartitionOffsets map[int32]int64 // partition => offset
-}
-
-func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
- return err
- }
- keys := make([]int32, 0, len(t.PartitionOffsets))
- for partition := range t.PartitionOffsets {
- keys = append(keys, partition)
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
- for _, partition := range keys {
- pe.putInt32(partition)
- pe.putInt64(t.PartitionOffsets[partition])
- }
- return nil
-}
-
-func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.PartitionOffsets = make(map[int32]int64, n)
- for i := 0; i < n; i++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- offset, err := pd.getInt64()
- if err != nil {
- return err
- }
- t.PartitionOffsets[partition] = offset
- }
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/delete_records_response.go b/vendor/gopkg.in/Shopify/sarama.v1/delete_records_response.go
deleted file mode 100644
index 733a58b..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/delete_records_response.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package sarama
-
-import (
- "sort"
- "time"
-)
-
-// response message format is:
-// throttleMs(int32) [topic]
-// where topic is:
-// name(string) [partition]
-// where partition is:
-// id(int32) low_watermark(int64) error_code(int16)
-
-type DeleteRecordsResponse struct {
- Version int16
- ThrottleTime time.Duration
- Topics map[string]*DeleteRecordsResponseTopic
-}
-
-func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(d.Topics)); err != nil {
- return err
- }
- keys := make([]string, 0, len(d.Topics))
- for topic := range d.Topics {
- keys = append(keys, topic)
- }
- sort.Strings(keys)
- for _, topic := range keys {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := d.Topics[topic].encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
- d.Version = version
-
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- details := new(DeleteRecordsResponseTopic)
- if err = details.decode(pd, version); err != nil {
- return err
- }
- d.Topics[topic] = details
- }
- }
-
- return nil
-}
-
-func (d *DeleteRecordsResponse) key() int16 {
- return 21
-}
-
-func (d *DeleteRecordsResponse) version() int16 {
- return 0
-}
-
-func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-type DeleteRecordsResponseTopic struct {
- Partitions map[int32]*DeleteRecordsResponsePartition
-}
-
-func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(t.Partitions)); err != nil {
- return err
- }
- keys := make([]int32, 0, len(t.Partitions))
- for partition := range t.Partitions {
- keys = append(keys, partition)
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
- for _, partition := range keys {
- pe.putInt32(partition)
- if err := t.Partitions[partition].encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
- for i := 0; i < n; i++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- details := new(DeleteRecordsResponsePartition)
- if err = details.decode(pd, version); err != nil {
- return err
- }
- t.Partitions[partition] = details
- }
- }
-
- return nil
-}
-
-type DeleteRecordsResponsePartition struct {
- LowWatermark int64
- Err KError
-}
-
-func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
- pe.putInt64(t.LowWatermark)
- pe.putInt16(int16(t.Err))
- return nil
-}
-
-func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
- lowWatermark, err := pd.getInt64()
- if err != nil {
- return err
- }
- t.LowWatermark = lowWatermark
-
- kErr, err := pd.getInt16()
- if err != nil {
- return err
- }
- t.Err = KError(kErr)
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/delete_topics_request.go b/vendor/gopkg.in/Shopify/sarama.v1/delete_topics_request.go
deleted file mode 100644
index 911f67d..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/delete_topics_request.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsRequest struct {
- Version int16
- Topics []string
- Timeout time.Duration
-}
-
-func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
- if err := pe.putStringArray(d.Topics); err != nil {
- return err
- }
- pe.putInt32(int32(d.Timeout / time.Millisecond))
-
- return nil
-}
-
-func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
- if d.Topics, err = pd.getStringArray(); err != nil {
- return err
- }
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.Timeout = time.Duration(timeout) * time.Millisecond
- d.Version = version
- return nil
-}
-
-func (d *DeleteTopicsRequest) key() int16 {
- return 20
-}
-
-func (d *DeleteTopicsRequest) version() int16 {
- return d.Version
-}
-
-func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/delete_topics_response.go b/vendor/gopkg.in/Shopify/sarama.v1/delete_topics_response.go
deleted file mode 100644
index 3422546..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/delete_topics_response.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsResponse struct {
- Version int16
- ThrottleTime time.Duration
- TopicErrorCodes map[string]KError
-}
-
-func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
- if d.Version >= 1 {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
- }
-
- if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
- return err
- }
- for topic, errorCode := range d.TopicErrorCodes {
- if err := pe.putString(topic); err != nil {
- return err
- }
- pe.putInt16(int16(errorCode))
- }
-
- return nil
-}
-
-func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
- if version >= 1 {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- d.Version = version
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- d.TopicErrorCodes = make(map[string]KError, n)
-
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- errorCode, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- d.TopicErrorCodes[topic] = KError(errorCode)
- }
-
- return nil
-}
-
-func (d *DeleteTopicsResponse) key() int16 {
- return 20
-}
-
-func (d *DeleteTopicsResponse) version() int16 {
- return d.Version
-}
-
-func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/describe_configs_request.go b/vendor/gopkg.in/Shopify/sarama.v1/describe_configs_request.go
deleted file mode 100644
index ccb587b..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/describe_configs_request.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package sarama
-
-type DescribeConfigsRequest struct {
- Version int16
- Resources []*ConfigResource
- IncludeSynonyms bool
-}
-
-type ConfigResource struct {
- Type ConfigResourceType
- Name string
- ConfigNames []string
-}
-
-func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(r.Resources)); err != nil {
- return err
- }
-
- for _, c := range r.Resources {
- pe.putInt8(int8(c.Type))
- if err := pe.putString(c.Name); err != nil {
- return err
- }
-
- if len(c.ConfigNames) == 0 {
- pe.putInt32(-1)
- continue
- }
- if err := pe.putStringArray(c.ConfigNames); err != nil {
- return err
- }
- }
-
- if r.Version >= 1 {
- pe.putBool(r.IncludeSynonyms)
- }
-
- return nil
-}
-
-func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Resources = make([]*ConfigResource, n)
-
- for i := 0; i < n; i++ {
- r.Resources[i] = &ConfigResource{}
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Resources[i].Type = ConfigResourceType(t)
- name, err := pd.getString()
- if err != nil {
- return err
- }
- r.Resources[i].Name = name
-
- confLength, err := pd.getArrayLength()
-
- if err != nil {
- return err
- }
-
- if confLength == -1 {
- continue
- }
-
- cfnames := make([]string, confLength)
- for i := 0; i < confLength; i++ {
- s, err := pd.getString()
- if err != nil {
- return err
- }
- cfnames[i] = s
- }
- r.Resources[i].ConfigNames = cfnames
- }
- r.Version = version
- if r.Version >= 1 {
- b, err := pd.getBool()
- if err != nil {
- return err
- }
- r.IncludeSynonyms = b
- }
-
- return nil
-}
-
-func (r *DescribeConfigsRequest) key() int16 {
- return 32
-}
-
-func (r *DescribeConfigsRequest) version() int16 {
- return r.Version
-}
-
-func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V1_1_0_0
- case 2:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/describe_configs_response.go b/vendor/gopkg.in/Shopify/sarama.v1/describe_configs_response.go
deleted file mode 100644
index 5737232..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/describe_configs_response.go
+++ /dev/null
@@ -1,320 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-type ConfigSource int8
-
-func (s ConfigSource) String() string {
- switch s {
- case SourceUnknown:
- return "Unknown"
- case SourceTopic:
- return "Topic"
- case SourceDynamicBroker:
- return "DynamicBroker"
- case SourceDynamicDefaultBroker:
- return "DynamicDefaultBroker"
- case SourceStaticBroker:
- return "StaticBroker"
- case SourceDefault:
- return "Default"
- }
- return fmt.Sprintf("Source Invalid: %d", int(s))
-}
-
-const (
- SourceUnknown ConfigSource = iota
- SourceTopic
- SourceDynamicBroker
- SourceDynamicDefaultBroker
- SourceStaticBroker
- SourceDefault
-)
-
-type DescribeConfigsResponse struct {
- Version int16
- ThrottleTime time.Duration
- Resources []*ResourceResponse
-}
-
-type ResourceResponse struct {
- ErrorCode int16
- ErrorMsg string
- Type ConfigResourceType
- Name string
- Configs []*ConfigEntry
-}
-
-type ConfigEntry struct {
- Name string
- Value string
- ReadOnly bool
- Default bool
- Source ConfigSource
- Sensitive bool
- Synonyms []*ConfigSynonym
-}
-
-type ConfigSynonym struct {
- ConfigName string
- ConfigValue string
- Source ConfigSource
-}
-
-func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- if err = pe.putArrayLength(len(r.Resources)); err != nil {
- return err
- }
-
- for _, c := range r.Resources {
- if err = c.encode(pe, r.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Resources = make([]*ResourceResponse, n)
- for i := 0; i < n; i++ {
- rr := &ResourceResponse{}
- if err := rr.decode(pd, version); err != nil {
- return err
- }
- r.Resources[i] = rr
- }
-
- return nil
-}
-
-func (r *DescribeConfigsResponse) key() int16 {
- return 32
-}
-
-func (r *DescribeConfigsResponse) version() int16 {
- return r.Version
-}
-
-func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V1_0_0_0
- case 2:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
-
-func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(r.ErrorCode)
-
- if err = pe.putString(r.ErrorMsg); err != nil {
- return err
- }
-
- pe.putInt8(int8(r.Type))
-
- if err = pe.putString(r.Name); err != nil {
- return err
- }
-
- if err = pe.putArrayLength(len(r.Configs)); err != nil {
- return err
- }
-
- for _, c := range r.Configs {
- if err = c.encode(pe, version); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
- ec, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.ErrorCode = ec
-
- em, err := pd.getString()
- if err != nil {
- return err
- }
- r.ErrorMsg = em
-
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Type = ConfigResourceType(t)
-
- name, err := pd.getString()
- if err != nil {
- return err
- }
- r.Name = name
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Configs = make([]*ConfigEntry, n)
- for i := 0; i < n; i++ {
- c := &ConfigEntry{}
- if err := c.decode(pd, version); err != nil {
- return err
- }
- r.Configs[i] = c
- }
- return nil
-}
-
-func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
- if err = pe.putString(r.Name); err != nil {
- return err
- }
-
- if err = pe.putString(r.Value); err != nil {
- return err
- }
-
- pe.putBool(r.ReadOnly)
-
- if version <= 0 {
- pe.putBool(r.Default)
- pe.putBool(r.Sensitive)
- } else {
- pe.putInt8(int8(r.Source))
- pe.putBool(r.Sensitive)
-
- if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
- return err
- }
- for _, c := range r.Synonyms {
- if err = c.encode(pe, version); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
-func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
- if version == 0 {
- r.Source = SourceUnknown
- }
- name, err := pd.getString()
- if err != nil {
- return err
- }
- r.Name = name
-
- value, err := pd.getString()
- if err != nil {
- return err
- }
- r.Value = value
-
- read, err := pd.getBool()
- if err != nil {
- return err
- }
- r.ReadOnly = read
-
- if version == 0 {
- defaultB, err := pd.getBool()
- if err != nil {
- return err
- }
- r.Default = defaultB
- } else {
- source, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Source = ConfigSource(source)
- }
-
- sensitive, err := pd.getBool()
- if err != nil {
- return err
- }
- r.Sensitive = sensitive
-
- if version > 0 {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.Synonyms = make([]*ConfigSynonym, n)
-
- for i := 0; i < n; i++ {
- s := &ConfigSynonym{}
- if err := s.decode(pd, version); err != nil {
- return err
- }
- r.Synonyms[i] = s
- }
-
- }
- return nil
-}
-
-func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
- err = pe.putString(c.ConfigName)
- if err != nil {
- return err
- }
-
- err = pe.putString(c.ConfigValue)
- if err != nil {
- return err
- }
-
- pe.putInt8(int8(c.Source))
-
- return nil
-}
-
-func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
- name, err := pd.getString()
- if err != nil {
- return nil
- }
- c.ConfigName = name
-
- value, err := pd.getString()
- if err != nil {
- return nil
- }
- c.ConfigValue = value
-
- source, err := pd.getInt8()
- if err != nil {
- return nil
- }
- c.Source = ConfigSource(source)
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_request.go b/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_request.go
deleted file mode 100644
index 1fb3567..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_request.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package sarama
-
-type DescribeGroupsRequest struct {
- Groups []string
-}
-
-func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
- return pe.putStringArray(r.Groups)
-}
-
-func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Groups, err = pd.getStringArray()
- return
-}
-
-func (r *DescribeGroupsRequest) key() int16 {
- return 15
-}
-
-func (r *DescribeGroupsRequest) version() int16 {
- return 0
-}
-
-func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-func (r *DescribeGroupsRequest) AddGroup(group string) {
- r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_response.go b/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_response.go
deleted file mode 100644
index 542b3a9..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/describe_groups_response.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package sarama
-
-type DescribeGroupsResponse struct {
- Groups []*GroupDescription
-}
-
-func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(r.Groups)); err != nil {
- return err
- }
-
- for _, groupDescription := range r.Groups {
- if err := groupDescription.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Groups = make([]*GroupDescription, n)
- for i := 0; i < n; i++ {
- r.Groups[i] = new(GroupDescription)
- if err := r.Groups[i].decode(pd); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeGroupsResponse) key() int16 {
- return 15
-}
-
-func (r *DescribeGroupsResponse) version() int16 {
- return 0
-}
-
-func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-type GroupDescription struct {
- Err KError
- GroupId string
- State string
- ProtocolType string
- Protocol string
- Members map[string]*GroupMemberDescription
-}
-
-func (gd *GroupDescription) encode(pe packetEncoder) error {
- pe.putInt16(int16(gd.Err))
-
- if err := pe.putString(gd.GroupId); err != nil {
- return err
- }
- if err := pe.putString(gd.State); err != nil {
- return err
- }
- if err := pe.putString(gd.ProtocolType); err != nil {
- return err
- }
- if err := pe.putString(gd.Protocol); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(gd.Members)); err != nil {
- return err
- }
-
- for memberId, groupMemberDescription := range gd.Members {
- if err := pe.putString(memberId); err != nil {
- return err
- }
- if err := groupMemberDescription.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- gd.Err = KError(kerr)
-
- if gd.GroupId, err = pd.getString(); err != nil {
- return
- }
- if gd.State, err = pd.getString(); err != nil {
- return
- }
- if gd.ProtocolType, err = pd.getString(); err != nil {
- return
- }
- if gd.Protocol, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- gd.Members = make(map[string]*GroupMemberDescription)
- for i := 0; i < n; i++ {
- memberId, err := pd.getString()
- if err != nil {
- return err
- }
-
- gd.Members[memberId] = new(GroupMemberDescription)
- if err := gd.Members[memberId].decode(pd); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type GroupMemberDescription struct {
- ClientId string
- ClientHost string
- MemberMetadata []byte
- MemberAssignment []byte
-}
-
-func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
- if err := pe.putString(gmd.ClientId); err != nil {
- return err
- }
- if err := pe.putString(gmd.ClientHost); err != nil {
- return err
- }
- if err := pe.putBytes(gmd.MemberMetadata); err != nil {
- return err
- }
- if err := pe.putBytes(gmd.MemberAssignment); err != nil {
- return err
- }
-
- return nil
-}
-
-func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
- if gmd.ClientId, err = pd.getString(); err != nil {
- return
- }
- if gmd.ClientHost, err = pd.getString(); err != nil {
- return
- }
- if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
- return
- }
- if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
-
-func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
- assignment := new(ConsumerGroupMemberAssignment)
- err := decode(gmd.MemberAssignment, assignment)
- return assignment, err
-}
-
-func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
- metadata := new(ConsumerGroupMemberMetadata)
- err := decode(gmd.MemberMetadata, metadata)
- return metadata, err
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/dev.yml b/vendor/gopkg.in/Shopify/sarama.v1/dev.yml
deleted file mode 100644
index 3f4d569..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/dev.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-name: sarama
-
-up:
- - go:
- version: '1.12'
-
-commands:
- test:
- run: make test
- desc: 'run unit tests'
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/encoder_decoder.go b/vendor/gopkg.in/Shopify/sarama.v1/encoder_decoder.go
deleted file mode 100644
index 7ce3bc0..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/encoder_decoder.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package sarama
-
-import (
- "fmt"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// Encoder is the interface that wraps the basic Encode method.
-// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
-type encoder interface {
- encode(pe packetEncoder) error
-}
-
-// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
-func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
- if e == nil {
- return nil, nil
- }
-
- var prepEnc prepEncoder
- var realEnc realEncoder
-
- err := e.encode(&prepEnc)
- if err != nil {
- return nil, err
- }
-
- if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
- return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
- }
-
- realEnc.raw = make([]byte, prepEnc.length)
- realEnc.registry = metricRegistry
- err = e.encode(&realEnc)
- if err != nil {
- return nil, err
- }
-
- return realEnc.raw, nil
-}
-
-// Decoder is the interface that wraps the basic Decode method.
-// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
-type decoder interface {
- decode(pd packetDecoder) error
-}
-
-type versionedDecoder interface {
- decode(pd packetDecoder, version int16) error
-}
-
-// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
-// interpreted using Kafka's encoding rules.
-func decode(buf []byte, in decoder) error {
- if buf == nil {
- return nil
- }
-
- helper := realDecoder{raw: buf}
- err := in.decode(&helper)
- if err != nil {
- return err
- }
-
- if helper.off != len(buf) {
- return PacketDecodingError{"invalid length"}
- }
-
- return nil
-}
-
-func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
- if buf == nil {
- return nil
- }
-
- helper := realDecoder{raw: buf}
- err := in.decode(&helper, version)
- if err != nil {
- return err
- }
-
- if helper.off != len(buf) {
- return PacketDecodingError{"invalid length"}
- }
-
- return nil
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/end_txn_request.go b/vendor/gopkg.in/Shopify/sarama.v1/end_txn_request.go
deleted file mode 100644
index 2cd9b50..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/end_txn_request.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package sarama
-
-type EndTxnRequest struct {
- TransactionalID string
- ProducerID int64
- ProducerEpoch int16
- TransactionResult bool
-}
-
-func (a *EndTxnRequest) encode(pe packetEncoder) error {
- if err := pe.putString(a.TransactionalID); err != nil {
- return err
- }
-
- pe.putInt64(a.ProducerID)
-
- pe.putInt16(a.ProducerEpoch)
-
- pe.putBool(a.TransactionResult)
-
- return nil
-}
-
-func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
- if a.TransactionalID, err = pd.getString(); err != nil {
- return err
- }
- if a.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
- if a.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
- if a.TransactionResult, err = pd.getBool(); err != nil {
- return err
- }
- return nil
-}
-
-func (a *EndTxnRequest) key() int16 {
- return 26
-}
-
-func (a *EndTxnRequest) version() int16 {
- return 0
-}
-
-func (a *EndTxnRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/end_txn_response.go b/vendor/gopkg.in/Shopify/sarama.v1/end_txn_response.go
deleted file mode 100644
index 33b27e3..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/end_txn_response.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-type EndTxnResponse struct {
- ThrottleTime time.Duration
- Err KError
-}
-
-func (e *EndTxnResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(e.Err))
- return nil
-}
-
-func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- e.Err = KError(kerr)
-
- return nil
-}
-
-func (e *EndTxnResponse) key() int16 {
- return 25
-}
-
-func (e *EndTxnResponse) version() int16 {
- return 0
-}
-
-func (e *EndTxnResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/errors.go b/vendor/gopkg.in/Shopify/sarama.v1/errors.go
deleted file mode 100644
index 8ecb652..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/errors.go
+++ /dev/null
@@ -1,344 +0,0 @@
-package sarama
-
-import (
- "errors"
- "fmt"
-)
-
-// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
-// or otherwise failed to respond.
-var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
-
-// ErrClosedClient is the error returned when a method is called on a client that has been closed.
-var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
-
-// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
-// not contain the expected information.
-var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
-
-// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
-// (meaning one outside of the range [0...numPartitions-1]).
-var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
-
-// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
-var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
-
-// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
-var ErrNotConnected = errors.New("kafka: broker not connected")
-
-// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
-// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
-// of the message set.
-var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
-
-// ErrShuttingDown is returned when a producer receives a message during shutdown.
-var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
-
-// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
-var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
-
-// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
-// a RecordBatch.
-var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
-
-// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
-// is lower than 0.10.0.0.
-var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
-
-// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
-// the metadata.
-var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
-
-// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
-// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
-type PacketEncodingError struct {
- Info string
-}
-
-func (err PacketEncodingError) Error() string {
- return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
-}
-
-// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
-// This can be a bad CRC or length field, or any other invalid value.
-type PacketDecodingError struct {
- Info string
-}
-
-func (err PacketDecodingError) Error() string {
- return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
-}
-
-// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
-// when the specified configuration is invalid.
-type ConfigurationError string
-
-func (err ConfigurationError) Error() string {
- return "kafka: invalid configuration (" + string(err) + ")"
-}
-
-// KError is the type of error that can be returned directly by the Kafka broker.
-// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
-type KError int16
-
-// Numeric error codes returned by the Kafka server.
-const (
- ErrNoError KError = 0
- ErrUnknown KError = -1
- ErrOffsetOutOfRange KError = 1
- ErrInvalidMessage KError = 2
- ErrUnknownTopicOrPartition KError = 3
- ErrInvalidMessageSize KError = 4
- ErrLeaderNotAvailable KError = 5
- ErrNotLeaderForPartition KError = 6
- ErrRequestTimedOut KError = 7
- ErrBrokerNotAvailable KError = 8
- ErrReplicaNotAvailable KError = 9
- ErrMessageSizeTooLarge KError = 10
- ErrStaleControllerEpochCode KError = 11
- ErrOffsetMetadataTooLarge KError = 12
- ErrNetworkException KError = 13
- ErrOffsetsLoadInProgress KError = 14
- ErrConsumerCoordinatorNotAvailable KError = 15
- ErrNotCoordinatorForConsumer KError = 16
- ErrInvalidTopic KError = 17
- ErrMessageSetSizeTooLarge KError = 18
- ErrNotEnoughReplicas KError = 19
- ErrNotEnoughReplicasAfterAppend KError = 20
- ErrInvalidRequiredAcks KError = 21
- ErrIllegalGeneration KError = 22
- ErrInconsistentGroupProtocol KError = 23
- ErrInvalidGroupId KError = 24
- ErrUnknownMemberId KError = 25
- ErrInvalidSessionTimeout KError = 26
- ErrRebalanceInProgress KError = 27
- ErrInvalidCommitOffsetSize KError = 28
- ErrTopicAuthorizationFailed KError = 29
- ErrGroupAuthorizationFailed KError = 30
- ErrClusterAuthorizationFailed KError = 31
- ErrInvalidTimestamp KError = 32
- ErrUnsupportedSASLMechanism KError = 33
- ErrIllegalSASLState KError = 34
- ErrUnsupportedVersion KError = 35
- ErrTopicAlreadyExists KError = 36
- ErrInvalidPartitions KError = 37
- ErrInvalidReplicationFactor KError = 38
- ErrInvalidReplicaAssignment KError = 39
- ErrInvalidConfig KError = 40
- ErrNotController KError = 41
- ErrInvalidRequest KError = 42
- ErrUnsupportedForMessageFormat KError = 43
- ErrPolicyViolation KError = 44
- ErrOutOfOrderSequenceNumber KError = 45
- ErrDuplicateSequenceNumber KError = 46
- ErrInvalidProducerEpoch KError = 47
- ErrInvalidTxnState KError = 48
- ErrInvalidProducerIDMapping KError = 49
- ErrInvalidTransactionTimeout KError = 50
- ErrConcurrentTransactions KError = 51
- ErrTransactionCoordinatorFenced KError = 52
- ErrTransactionalIDAuthorizationFailed KError = 53
- ErrSecurityDisabled KError = 54
- ErrOperationNotAttempted KError = 55
- ErrKafkaStorageError KError = 56
- ErrLogDirNotFound KError = 57
- ErrSASLAuthenticationFailed KError = 58
- ErrUnknownProducerID KError = 59
- ErrReassignmentInProgress KError = 60
- ErrDelegationTokenAuthDisabled KError = 61
- ErrDelegationTokenNotFound KError = 62
- ErrDelegationTokenOwnerMismatch KError = 63
- ErrDelegationTokenRequestNotAllowed KError = 64
- ErrDelegationTokenAuthorizationFailed KError = 65
- ErrDelegationTokenExpired KError = 66
- ErrInvalidPrincipalType KError = 67
- ErrNonEmptyGroup KError = 68
- ErrGroupIDNotFound KError = 69
- ErrFetchSessionIDNotFound KError = 70
- ErrInvalidFetchSessionEpoch KError = 71
- ErrListenerNotFound KError = 72
- ErrTopicDeletionDisabled KError = 73
- ErrFencedLeaderEpoch KError = 74
- ErrUnknownLeaderEpoch KError = 75
- ErrUnsupportedCompressionType KError = 76
- ErrStaleBrokerEpoch KError = 77
- ErrOffsetNotAvailable KError = 78
- ErrMemberIdRequired KError = 79
- ErrPreferredLeaderNotAvailable KError = 80
- ErrGroupMaxSizeReached KError = 81
-)
-
-func (err KError) Error() string {
- // Error messages stolen/adapted from
- // https://kafka.apache.org/protocol#protocol_error_codes
- switch err {
- case ErrNoError:
- return "kafka server: Not an error, why are you printing me?"
- case ErrUnknown:
- return "kafka server: Unexpected (unknown?) server error."
- case ErrOffsetOutOfRange:
- return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
- case ErrInvalidMessage:
- return "kafka server: Message contents does not match its CRC."
- case ErrUnknownTopicOrPartition:
- return "kafka server: Request was for a topic or partition that does not exist on this broker."
- case ErrInvalidMessageSize:
- return "kafka server: The message has a negative size."
- case ErrLeaderNotAvailable:
- return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
- case ErrNotLeaderForPartition:
- return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
- case ErrRequestTimedOut:
- return "kafka server: Request exceeded the user-specified time limit in the request."
- case ErrBrokerNotAvailable:
- return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
- case ErrReplicaNotAvailable:
- return "kafka server: Replica information not available, one or more brokers are down."
- case ErrMessageSizeTooLarge:
- return "kafka server: Message was too large, server rejected it to avoid allocation error."
- case ErrStaleControllerEpochCode:
- return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
- case ErrOffsetMetadataTooLarge:
- return "kafka server: Specified a string larger than the configured maximum for offset metadata."
- case ErrNetworkException:
- return "kafka server: The server disconnected before a response was received."
- case ErrOffsetsLoadInProgress:
- return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
- case ErrConsumerCoordinatorNotAvailable:
- return "kafka server: Offset's topic has not yet been created."
- case ErrNotCoordinatorForConsumer:
- return "kafka server: Request was for a consumer group that is not coordinated by this broker."
- case ErrInvalidTopic:
- return "kafka server: The request attempted to perform an operation on an invalid topic."
- case ErrMessageSetSizeTooLarge:
- return "kafka server: The request included message batch larger than the configured segment size on the server."
- case ErrNotEnoughReplicas:
- return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
- case ErrNotEnoughReplicasAfterAppend:
- return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
- case ErrInvalidRequiredAcks:
- return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
- case ErrIllegalGeneration:
- return "kafka server: The provided generation id is not the current generation."
- case ErrInconsistentGroupProtocol:
- return "kafka server: The provider group protocol type is incompatible with the other members."
- case ErrInvalidGroupId:
- return "kafka server: The provided group id was empty."
- case ErrUnknownMemberId:
- return "kafka server: The provided member is not known in the current generation."
- case ErrInvalidSessionTimeout:
- return "kafka server: The provided session timeout is outside the allowed range."
- case ErrRebalanceInProgress:
- return "kafka server: A rebalance for the group is in progress. Please re-join the group."
- case ErrInvalidCommitOffsetSize:
- return "kafka server: The provided commit metadata was too large."
- case ErrTopicAuthorizationFailed:
- return "kafka server: The client is not authorized to access this topic."
- case ErrGroupAuthorizationFailed:
- return "kafka server: The client is not authorized to access this group."
- case ErrClusterAuthorizationFailed:
- return "kafka server: The client is not authorized to send this request type."
- case ErrInvalidTimestamp:
- return "kafka server: The timestamp of the message is out of acceptable range."
- case ErrUnsupportedSASLMechanism:
- return "kafka server: The broker does not support the requested SASL mechanism."
- case ErrIllegalSASLState:
- return "kafka server: Request is not valid given the current SASL state."
- case ErrUnsupportedVersion:
- return "kafka server: The version of API is not supported."
- case ErrTopicAlreadyExists:
- return "kafka server: Topic with this name already exists."
- case ErrInvalidPartitions:
- return "kafka server: Number of partitions is invalid."
- case ErrInvalidReplicationFactor:
- return "kafka server: Replication-factor is invalid."
- case ErrInvalidReplicaAssignment:
- return "kafka server: Replica assignment is invalid."
- case ErrInvalidConfig:
- return "kafka server: Configuration is invalid."
- case ErrNotController:
- return "kafka server: This is not the correct controller for this cluster."
- case ErrInvalidRequest:
- return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
- case ErrUnsupportedForMessageFormat:
- return "kafka server: The requested operation is not supported by the message format version."
- case ErrPolicyViolation:
- return "kafka server: Request parameters do not satisfy the configured policy."
- case ErrOutOfOrderSequenceNumber:
- return "kafka server: The broker received an out of order sequence number."
- case ErrDuplicateSequenceNumber:
- return "kafka server: The broker received a duplicate sequence number."
- case ErrInvalidProducerEpoch:
- return "kafka server: Producer attempted an operation with an old epoch."
- case ErrInvalidTxnState:
- return "kafka server: The producer attempted a transactional operation in an invalid state."
- case ErrInvalidProducerIDMapping:
- return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
- case ErrInvalidTransactionTimeout:
- return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
- case ErrConcurrentTransactions:
- return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
- case ErrTransactionCoordinatorFenced:
- return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
- case ErrTransactionalIDAuthorizationFailed:
- return "kafka server: Transactional ID authorization failed."
- case ErrSecurityDisabled:
- return "kafka server: Security features are disabled."
- case ErrOperationNotAttempted:
- return "kafka server: The broker did not attempt to execute this operation."
- case ErrKafkaStorageError:
- return "kafka server: Disk error when trying to access log file on the disk."
- case ErrLogDirNotFound:
- return "kafka server: The specified log directory is not found in the broker config."
- case ErrSASLAuthenticationFailed:
- return "kafka server: SASL Authentication failed."
- case ErrUnknownProducerID:
- return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
- case ErrReassignmentInProgress:
- return "kafka server: A partition reassignment is in progress."
- case ErrDelegationTokenAuthDisabled:
- return "kafka server: Delegation Token feature is not enabled."
- case ErrDelegationTokenNotFound:
- return "kafka server: Delegation Token is not found on server."
- case ErrDelegationTokenOwnerMismatch:
- return "kafka server: Specified Principal is not valid Owner/Renewer."
- case ErrDelegationTokenRequestNotAllowed:
- return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
- case ErrDelegationTokenAuthorizationFailed:
- return "kafka server: Delegation Token authorization failed."
- case ErrDelegationTokenExpired:
- return "kafka server: Delegation Token is expired."
- case ErrInvalidPrincipalType:
- return "kafka server: Supplied principalType is not supported."
- case ErrNonEmptyGroup:
- return "kafka server: The group is not empty."
- case ErrGroupIDNotFound:
- return "kafka server: The group id does not exist."
- case ErrFetchSessionIDNotFound:
- return "kafka server: The fetch session ID was not found."
- case ErrInvalidFetchSessionEpoch:
- return "kafka server: The fetch session epoch is invalid."
- case ErrListenerNotFound:
- return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
- case ErrTopicDeletionDisabled:
- return "kafka server: Topic deletion is disabled."
- case ErrFencedLeaderEpoch:
- return "kafka server: The leader epoch in the request is older than the epoch on the broker."
- case ErrUnknownLeaderEpoch:
- return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
- case ErrUnsupportedCompressionType:
- return "kafka server: The requesting client does not support the compression type of given partition."
- case ErrStaleBrokerEpoch:
- return "kafka server: Broker epoch has changed"
- case ErrOffsetNotAvailable:
- return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
- case ErrMemberIdRequired:
- return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
- case ErrPreferredLeaderNotAvailable:
- return "kafka server: The preferred leader was not available"
- case ErrGroupMaxSizeReached:
- return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
- }
-
- return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/fetch_request.go b/vendor/gopkg.in/Shopify/sarama.v1/fetch_request.go
deleted file mode 100644
index 4db9ddd..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/fetch_request.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package sarama
-
-type fetchRequestBlock struct {
- fetchOffset int64
- maxBytes int32
-}
-
-func (b *fetchRequestBlock) encode(pe packetEncoder) error {
- pe.putInt64(b.fetchOffset)
- pe.putInt32(b.maxBytes)
- return nil
-}
-
-func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
- if b.fetchOffset, err = pd.getInt64(); err != nil {
- return err
- }
- if b.maxBytes, err = pd.getInt32(); err != nil {
- return err
- }
- return nil
-}
-
-// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
-// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
-type FetchRequest struct {
- MaxWaitTime int32
- MinBytes int32
- MaxBytes int32
- Version int16
- Isolation IsolationLevel
- blocks map[string]map[int32]*fetchRequestBlock
-}
-
-type IsolationLevel int8
-
-const (
- ReadUncommitted IsolationLevel = iota
- ReadCommitted
-)
-
-func (r *FetchRequest) encode(pe packetEncoder) (err error) {
- pe.putInt32(-1) // replica ID is always -1 for clients
- pe.putInt32(r.MaxWaitTime)
- pe.putInt32(r.MinBytes)
- if r.Version >= 3 {
- pe.putInt32(r.MaxBytes)
- }
- if r.Version >= 4 {
- pe.putInt8(int8(r.Isolation))
- }
- err = pe.putArrayLength(len(r.blocks))
- if err != nil {
- return err
- }
- for topic, blocks := range r.blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(blocks))
- if err != nil {
- return err
- }
- for partition, block := range blocks {
- pe.putInt32(partition)
- err = block.encode(pe)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
- if _, err = pd.getInt32(); err != nil {
- return err
- }
- if r.MaxWaitTime, err = pd.getInt32(); err != nil {
- return err
- }
- if r.MinBytes, err = pd.getInt32(); err != nil {
- return err
- }
- if r.Version >= 3 {
- if r.MaxBytes, err = pd.getInt32(); err != nil {
- return err
- }
- }
- if r.Version >= 4 {
- isolation, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Isolation = IsolationLevel(isolation)
- }
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
- r.blocks = make(map[string]map[int32]*fetchRequestBlock)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*fetchRequestBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- fetchBlock := &fetchRequestBlock{}
- if err = fetchBlock.decode(pd); err != nil {
- return err
- }
- r.blocks[topic][partition] = fetchBlock
- }
- }
- return nil
-}
-
-func (r *FetchRequest) key() int16 {
- return 1
-}
-
-func (r *FetchRequest) version() int16 {
- return r.Version
-}
-
-func (r *FetchRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- case 3:
- return V0_10_1_0
- case 4:
- return V0_11_0_0
- default:
- return MinVersion
- }
-}
-
-func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*fetchRequestBlock)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*fetchRequestBlock)
- }
-
- tmp := new(fetchRequestBlock)
- tmp.maxBytes = maxBytes
- tmp.fetchOffset = fetchOffset
-
- r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/fetch_response.go b/vendor/gopkg.in/Shopify/sarama.v1/fetch_response.go
deleted file mode 100644
index 3afc187..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/fetch_response.go
+++ /dev/null
@@ -1,489 +0,0 @@
-package sarama
-
-import (
- "sort"
- "time"
-)
-
-type AbortedTransaction struct {
- ProducerID int64
- FirstOffset int64
-}
-
-func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
- if t.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
-
- if t.FirstOffset, err = pd.getInt64(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
- pe.putInt64(t.ProducerID)
- pe.putInt64(t.FirstOffset)
-
- return nil
-}
-
-type FetchResponseBlock struct {
- Err KError
- HighWaterMarkOffset int64
- LastStableOffset int64
- AbortedTransactions []*AbortedTransaction
- Records *Records // deprecated: use FetchResponseBlock.RecordsSet
- RecordsSet []*Records
- Partial bool
-}
-
-func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- b.HighWaterMarkOffset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- if version >= 4 {
- b.LastStableOffset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- numTransact, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if numTransact >= 0 {
- b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
- }
-
- for i := 0; i < numTransact; i++ {
- transact := new(AbortedTransaction)
- if err = transact.decode(pd); err != nil {
- return err
- }
- b.AbortedTransactions[i] = transact
- }
- }
-
- recordsSize, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- recordsDecoder, err := pd.getSubset(int(recordsSize))
- if err != nil {
- return err
- }
-
- b.RecordsSet = []*Records{}
-
- for recordsDecoder.remaining() > 0 {
- records := &Records{}
- if err := records.decode(recordsDecoder); err != nil {
- // If we have at least one decoded records, this is not an error
- if err == ErrInsufficientData {
- if len(b.RecordsSet) == 0 {
- b.Partial = true
- }
- break
- }
- return err
- }
-
- partial, err := records.isPartial()
- if err != nil {
- return err
- }
-
- n, err := records.numRecords()
- if err != nil {
- return err
- }
-
- if n > 0 || (partial && len(b.RecordsSet) == 0) {
- b.RecordsSet = append(b.RecordsSet, records)
-
- if b.Records == nil {
- b.Records = records
- }
- }
-
- overflow, err := records.isOverflow()
- if err != nil {
- return err
- }
-
- if partial || overflow {
- break
- }
- }
-
- return nil
-}
-
-func (b *FetchResponseBlock) numRecords() (int, error) {
- sum := 0
-
- for _, records := range b.RecordsSet {
- count, err := records.numRecords()
- if err != nil {
- return 0, err
- }
-
- sum += count
- }
-
- return sum, nil
-}
-
-func (b *FetchResponseBlock) isPartial() (bool, error) {
- if b.Partial {
- return true, nil
- }
-
- if len(b.RecordsSet) == 1 {
- return b.RecordsSet[0].isPartial()
- }
-
- return false, nil
-}
-
-func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(int16(b.Err))
-
- pe.putInt64(b.HighWaterMarkOffset)
-
- if version >= 4 {
- pe.putInt64(b.LastStableOffset)
-
- if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
- return err
- }
- for _, transact := range b.AbortedTransactions {
- if err = transact.encode(pe); err != nil {
- return err
- }
- }
- }
-
- pe.push(&lengthField{})
- for _, records := range b.RecordsSet {
- err = records.encode(pe)
- if err != nil {
- return err
- }
- }
- return pe.pop()
-}
-
-func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
- // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
- // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
- at := b.AbortedTransactions
- sort.Slice(
- at,
- func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
- )
- return at
-}
-
-type FetchResponse struct {
- Blocks map[string]map[int32]*FetchResponseBlock
- ThrottleTime time.Duration
- Version int16 // v1 requires 0.9+, v2 requires 0.10+
- LogAppendTime bool
- Timestamp time.Time
-}
-
-func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.Version >= 1 {
- throttle, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttle) * time.Millisecond
- }
-
- numTopics, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(FetchResponseBlock)
- err = block.decode(pd, version)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- return nil
-}
-
-func (r *FetchResponse) encode(pe packetEncoder) (err error) {
- if r.Version >= 1 {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- }
-
- err = pe.putArrayLength(len(r.Blocks))
- if err != nil {
- return err
- }
-
- for topic, partitions := range r.Blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
-
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
-
- for id, block := range partitions {
- pe.putInt32(id)
- err = block.encode(pe, r.Version)
- if err != nil {
- return err
- }
- }
-
- }
- return nil
-}
-
-func (r *FetchResponse) key() int16 {
- return 1
-}
-
-func (r *FetchResponse) version() int16 {
- return r.Version
-}
-
-func (r *FetchResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- case 3:
- return V0_10_1_0
- case 4:
- return V0_11_0_0
- default:
- return MinVersion
- }
-}
-
-func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
- }
- partitions, ok := r.Blocks[topic]
- if !ok {
- partitions = make(map[int32]*FetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- frb, ok := partitions[partition]
- if !ok {
- frb = new(FetchResponseBlock)
- partitions[partition] = frb
- }
- frb.Err = err
-}
-
-func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
- }
- partitions, ok := r.Blocks[topic]
- if !ok {
- partitions = make(map[int32]*FetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- frb, ok := partitions[partition]
- if !ok {
- frb = new(FetchResponseBlock)
- partitions[partition] = frb
- }
-
- return frb
-}
-
-func encodeKV(key, value Encoder) ([]byte, []byte) {
- var kb []byte
- var vb []byte
- if key != nil {
- kb, _ = key.Encode()
- }
- if value != nil {
- vb, _ = value.Encode()
- }
-
- return kb, vb
-}
-
-func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
- frb := r.getOrCreateBlock(topic, partition)
- kb, vb := encodeKV(key, value)
- if r.LogAppendTime {
- timestamp = r.Timestamp
- }
- msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
- msgBlock := &MessageBlock{Msg: msg, Offset: offset}
- if len(frb.RecordsSet) == 0 {
- records := newLegacyRecords(&MessageSet{})
- frb.RecordsSet = []*Records{&records}
- }
- set := frb.RecordsSet[0].MsgSet
- set.Messages = append(set.Messages, msgBlock)
-}
-
-func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
- frb := r.getOrCreateBlock(topic, partition)
- kb, vb := encodeKV(key, value)
- if len(frb.RecordsSet) == 0 {
- records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
- frb.RecordsSet = []*Records{&records}
- }
- batch := frb.RecordsSet[0].RecordBatch
- rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
- batch.addRecord(rec)
-}
-
-// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
-// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
-// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
-func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
- frb := r.getOrCreateBlock(topic, partition)
- kb, vb := encodeKV(key, value)
-
- records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
- batch := &RecordBatch{
- Version: 2,
- LogAppendTime: r.LogAppendTime,
- FirstTimestamp: timestamp,
- MaxTimestamp: r.Timestamp,
- FirstOffset: offset,
- LastOffsetDelta: 0,
- ProducerID: producerID,
- IsTransactional: isTransactional,
- }
- rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
- batch.addRecord(rec)
- records.RecordBatch = batch
-
- frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
- frb := r.getOrCreateBlock(topic, partition)
-
- // batch
- batch := &RecordBatch{
- Version: 2,
- LogAppendTime: r.LogAppendTime,
- FirstTimestamp: timestamp,
- MaxTimestamp: r.Timestamp,
- FirstOffset: offset,
- LastOffsetDelta: 0,
- ProducerID: producerID,
- IsTransactional: true,
- Control: true,
- }
-
- // records
- records := newDefaultRecords(nil)
- records.RecordBatch = batch
-
- // record
- crAbort := ControlRecord{
- Version: 0,
- Type: recordType,
- }
- crKey := &realEncoder{raw: make([]byte, 4)}
- crValue := &realEncoder{raw: make([]byte, 6)}
- crAbort.encode(crKey, crValue)
- rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
- batch.addRecord(rec)
-
- frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
- r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
-}
-
-func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
- r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
-}
-
-func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
- r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
-}
-
-func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
- // define controlRecord key and value
- r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
-}
-
-func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
- frb := r.getOrCreateBlock(topic, partition)
- if len(frb.RecordsSet) == 0 {
- records := newDefaultRecords(&RecordBatch{Version: 2})
- frb.RecordsSet = []*Records{&records}
- }
- batch := frb.RecordsSet[0].RecordBatch
- batch.LastOffsetDelta = offset
-}
-
-func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
- frb := r.getOrCreateBlock(topic, partition)
- frb.LastStableOffset = offset
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/find_coordinator_request.go b/vendor/gopkg.in/Shopify/sarama.v1/find_coordinator_request.go
deleted file mode 100644
index ff2ad20..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/find_coordinator_request.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package sarama
-
-type CoordinatorType int8
-
-const (
- CoordinatorGroup CoordinatorType = iota
- CoordinatorTransaction
-)
-
-type FindCoordinatorRequest struct {
- Version int16
- CoordinatorKey string
- CoordinatorType CoordinatorType
-}
-
-func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
- if err := pe.putString(f.CoordinatorKey); err != nil {
- return err
- }
-
- if f.Version >= 1 {
- pe.putInt8(int8(f.CoordinatorType))
- }
-
- return nil
-}
-
-func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
- if f.CoordinatorKey, err = pd.getString(); err != nil {
- return err
- }
-
- if version >= 1 {
- f.Version = version
- coordinatorType, err := pd.getInt8()
- if err != nil {
- return err
- }
-
- f.CoordinatorType = CoordinatorType(coordinatorType)
- }
-
- return nil
-}
-
-func (f *FindCoordinatorRequest) key() int16 {
- return 10
-}
-
-func (f *FindCoordinatorRequest) version() int16 {
- return f.Version
-}
-
-func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
- switch f.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_8_2_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/find_coordinator_response.go b/vendor/gopkg.in/Shopify/sarama.v1/find_coordinator_response.go
deleted file mode 100644
index 9c900e8..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/find_coordinator_response.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-var NoNode = &Broker{id: -1, addr: ":-1"}
-
-type FindCoordinatorResponse struct {
- Version int16
- ThrottleTime time.Duration
- Err KError
- ErrMsg *string
- Coordinator *Broker
-}
-
-func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
- if version >= 1 {
- f.Version = version
-
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
- }
-
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- f.Err = KError(tmp)
-
- if version >= 1 {
- if f.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
- }
-
- coordinator := new(Broker)
- // The version is hardcoded to 0, as version 1 of the Broker-decode
- // contains the rack-field which is not present in the FindCoordinatorResponse.
- if err := coordinator.decode(pd, 0); err != nil {
- return err
- }
- if coordinator.addr == ":0" {
- return nil
- }
- f.Coordinator = coordinator
-
- return nil
-}
-
-func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
- if f.Version >= 1 {
- pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
- }
-
- pe.putInt16(int16(f.Err))
-
- if f.Version >= 1 {
- if err := pe.putNullableString(f.ErrMsg); err != nil {
- return err
- }
- }
-
- coordinator := f.Coordinator
- if coordinator == nil {
- coordinator = NoNode
- }
- if err := coordinator.encode(pe, 0); err != nil {
- return err
- }
- return nil
-}
-
-func (f *FindCoordinatorResponse) key() int16 {
- return 10
-}
-
-func (f *FindCoordinatorResponse) version() int16 {
- return f.Version
-}
-
-func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
- switch f.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_8_2_0
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/go.mod b/vendor/gopkg.in/Shopify/sarama.v1/go.mod
deleted file mode 100644
index 8c45155..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/go.mod
+++ /dev/null
@@ -1,24 +0,0 @@
-module github.com/Shopify/sarama
-
-require (
- github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798
- github.com/Shopify/toxiproxy v2.1.4+incompatible
- github.com/davecgh/go-spew v1.1.1
- github.com/eapache/go-resiliency v1.1.0
- github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
- github.com/eapache/queue v1.1.0
- github.com/golang/snappy v0.0.1 // indirect
- github.com/hashicorp/go-uuid v1.0.1 // indirect
- github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03
- github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41
- github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a
- github.com/stretchr/testify v1.3.0
- github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
- github.com/xdg/stringprep v1.0.0 // indirect
- golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 // indirect
- golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
- gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect
- gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect
- gopkg.in/jcmturner/gokrb5.v7 v7.2.3
- gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect
-)
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/go.sum b/vendor/gopkg.in/Shopify/sarama.v1/go.sum
deleted file mode 100644
index 4dbc6d2..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/go.sum
+++ /dev/null
@@ -1,51 +0,0 @@
-github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798 h1:2T/jmrHeTezcCM58lvEQXs0UpQJCo5SoGAcg+mbSTIg=
-github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
-github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
-github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 h1:GeinFsrjWz97fAxVUEd748aV0cYL+I6k44gFJTCVvpU=
-github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
-github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE=
-golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=
-gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
-gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
-gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
-gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
-gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/gssapi_kerberos.go b/vendor/gopkg.in/Shopify/sarama.v1/gssapi_kerberos.go
deleted file mode 100644
index 49b632d..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/gssapi_kerberos.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "fmt"
- "github.com/jcmturner/gofork/encoding/asn1"
- "gopkg.in/jcmturner/gokrb5.v7/asn1tools"
- "gopkg.in/jcmturner/gokrb5.v7/gssapi"
- "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
- "gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
- "gopkg.in/jcmturner/gokrb5.v7/messages"
- "gopkg.in/jcmturner/gokrb5.v7/types"
- "io"
- "strings"
- "time"
-)
-
-const (
- TOK_ID_KRB_AP_REQ = 256
- GSS_API_GENERIC_TAG = 0x60
- KRB5_USER_AUTH = 1
- KRB5_KEYTAB_AUTH = 2
- GSS_API_INITIAL = 1
- GSS_API_VERIFY = 2
- GSS_API_FINISH = 3
-)
-
-type GSSAPIConfig struct {
- AuthType int
- KeyTabPath string
- KerberosConfigPath string
- ServiceName string
- Username string
- Password string
- Realm string
-}
-
-type GSSAPIKerberosAuth struct {
- Config *GSSAPIConfig
- ticket messages.Ticket
- encKey types.EncryptionKey
- NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
- step int
-}
-
-type KerberosClient interface {
- Login() error
- GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
- Domain() string
- CName() types.PrincipalName
- Destroy()
-}
-
-/*
-*
-* Appends length in big endian before payload, and send it to kafka
-*
- */
-
-func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
- length := len(payload)
- finalPackage := make([]byte, length+4) //4 byte length header + payload
- copy(finalPackage[4:], payload)
- binary.BigEndian.PutUint32(finalPackage, uint32(length))
- bytes, err := broker.conn.Write(finalPackage)
- if err != nil {
- return bytes, err
- }
- return bytes, nil
-}
-
-/*
-*
-* Read length (4 bytes) and then read the payload
-*
- */
-
-func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
- bytesRead := 0
- lengthInBytes := make([]byte, 4)
- bytes, err := io.ReadFull(broker.conn, lengthInBytes)
- if err != nil {
- return nil, bytesRead, err
- }
- bytesRead += bytes
- payloadLength := binary.BigEndian.Uint32(lengthInBytes)
- payloadBytes := make([]byte, payloadLength) // buffer for read..
- bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
- if err != nil {
- return payloadBytes, bytesRead, err
- }
- bytesRead += bytes
- return payloadBytes, bytesRead, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
- a := make([]byte, 24)
- flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
- binary.LittleEndian.PutUint32(a[:4], 16)
- for _, i := range flags {
- f := binary.LittleEndian.Uint32(a[20:24])
- f |= uint32(i)
- binary.LittleEndian.PutUint32(a[20:24], f)
- }
- return a
-}
-
-/*
-*
-* Construct Kerberos AP_REQ package, conforming to RFC-4120
-* https://tools.ietf.org/html/rfc4120#page-84
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
- domain string, cname types.PrincipalName,
- ticket messages.Ticket,
- sessionKey types.EncryptionKey) ([]byte, error) {
- auth, err := types.NewAuthenticator(domain, cname)
- if err != nil {
- return nil, err
- }
- auth.Cksum = types.Checksum{
- CksumType: chksumtype.GSSAPI,
- Checksum: krbAuth.newAuthenticatorChecksum(),
- }
- APReq, err := messages.NewAPReq(
- ticket,
- sessionKey,
- auth,
- )
- if err != nil {
- return nil, err
- }
- aprBytes := make([]byte, 2)
- binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
- tb, err := APReq.Marshal()
- if err != nil {
- return nil, err
- }
- aprBytes = append(aprBytes, tb...)
- return aprBytes, nil
-}
-
-/*
-*
-* Append the GSS-API header to the payload, conforming to RFC-2743
-* Section 3.1, Mechanism-Independent Token Format
-*
-* https://tools.ietf.org/html/rfc2743#page-81
-*
-* GSSAPIHeader + <specific mechanism payload>
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
- oidBytes, err := asn1.Marshal(gssapi.OID(gssapi.OIDKRB5))
- if err != nil {
- return nil, err
- }
- tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
- GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
- GSSHeader = append(GSSHeader, oidBytes...)
- GSSPackage := append(GSSHeader, payload...)
- return GSSPackage, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
- switch krbAuth.step {
- case GSS_API_INITIAL:
- aprBytes, err := krbAuth.createKrb5Token(
- kerberosClient.Domain(),
- kerberosClient.CName(),
- krbAuth.ticket,
- krbAuth.encKey)
- if err != nil {
- return nil, err
- }
- krbAuth.step = GSS_API_VERIFY
- return krbAuth.appendGSSAPIHeader(aprBytes)
- case GSS_API_VERIFY:
- wrapTokenReq := gssapi.WrapToken{}
- if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
- return nil, err
- }
- // Validate response.
- isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
- if !isValid {
- return nil, err
- }
-
- wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
- if err != nil {
- return nil, err
- }
- krbAuth.step = GSS_API_FINISH
- return wrapTokenResponse.Marshal()
- }
- return nil, nil
-}
-
-/* This does the handshake for authorization */
-func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
-
- kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
- if err != nil {
- Logger.Printf("Kerberos client error: %s", err)
- return err
- }
-
- err = kerberosClient.Login()
- if err != nil {
- Logger.Printf("Kerberos client error: %s", err)
- return err
- }
- // Construct SPN using serviceName and host
- // SPN format: <SERVICE>/<FQDN>
-
- host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
- spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
-
- ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
-
- if err != nil {
- Logger.Printf("Error getting Kerberos service ticket : %s", err)
- return err
- }
- krbAuth.ticket = ticket
- krbAuth.encKey = encKey
- krbAuth.step = GSS_API_INITIAL
- var receivedBytes []byte = nil
- defer kerberosClient.Destroy()
- for {
- packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
- if err != nil {
- Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
- return err
- }
- requestTime := time.Now()
- bytesWritten, err := krbAuth.writePackage(broker, packBytes)
- if err != nil {
- Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
- return err
- }
- broker.updateOutgoingCommunicationMetrics(bytesWritten)
- if krbAuth.step == GSS_API_VERIFY {
- var bytesRead = 0
- receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
- requestLatency := time.Since(requestTime)
- broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
- if err != nil {
- Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
- return err
- }
- } else if krbAuth.step == GSS_API_FINISH {
- return nil
- }
- }
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_request.go b/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_request.go
deleted file mode 100644
index ce49c47..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_request.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package sarama
-
-type HeartbeatRequest struct {
- GroupId string
- GenerationId int32
- MemberId string
-}
-
-func (r *HeartbeatRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
-
- pe.putInt32(r.GenerationId)
-
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
- if r.GenerationId, err = pd.getInt32(); err != nil {
- return
- }
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- return nil
-}
-
-func (r *HeartbeatRequest) key() int16 {
- return 12
-}
-
-func (r *HeartbeatRequest) version() int16 {
- return 0
-}
-
-func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_response.go b/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_response.go
deleted file mode 100644
index 766f5fd..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/heartbeat_response.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package sarama
-
-type HeartbeatResponse struct {
- Err KError
-}
-
-func (r *HeartbeatResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return nil
-}
-
-func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Err = KError(kerr)
-
- return nil
-}
-
-func (r *HeartbeatResponse) key() int16 {
- return 12
-}
-
-func (r *HeartbeatResponse) version() int16 {
- return 0
-}
-
-func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/init_producer_id_request.go b/vendor/gopkg.in/Shopify/sarama.v1/init_producer_id_request.go
deleted file mode 100644
index 8ceb6c2..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/init_producer_id_request.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDRequest struct {
- TransactionalID *string
- TransactionTimeout time.Duration
-}
-
-func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
- if err := pe.putNullableString(i.TransactionalID); err != nil {
- return err
- }
- pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
-
- return nil
-}
-
-func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
- if i.TransactionalID, err = pd.getNullableString(); err != nil {
- return err
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
-
- return nil
-}
-
-func (i *InitProducerIDRequest) key() int16 {
- return 22
-}
-
-func (i *InitProducerIDRequest) version() int16 {
- return 0
-}
-
-func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/init_producer_id_response.go b/vendor/gopkg.in/Shopify/sarama.v1/init_producer_id_response.go
deleted file mode 100644
index 1b32eb0..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/init_producer_id_response.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDResponse struct {
- ThrottleTime time.Duration
- Err KError
- ProducerID int64
- ProducerEpoch int16
-}
-
-func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(i.Err))
- pe.putInt64(i.ProducerID)
- pe.putInt16(i.ProducerEpoch)
-
- return nil
-}
-
-func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- i.Err = KError(kerr)
-
- if i.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
-
- if i.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (i *InitProducerIDResponse) key() int16 {
- return 22
-}
-
-func (i *InitProducerIDResponse) version() int16 {
- return 0
-}
-
-func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/gopkg.in/Shopify/sarama.v1/join_group_request.go b/vendor/gopkg.in/Shopify/sarama.v1/join_group_request.go
deleted file mode 100644
index 97e9299..0000000
--- a/vendor/gopkg.in/Shopify/sarama.v1/join_group_request.go
+++ /dev/null
@@ -1,16