blob: 3afc18778953f0ace38d8333cb311743ca06910b [file] [log] [blame]
Scott Bakered4efab2020-01-13 19:12:25 -08001package sarama
2
3import (
4 "sort"
5 "time"
6)
7
8type AbortedTransaction struct {
9 ProducerID int64
10 FirstOffset int64
11}
12
13func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
14 if t.ProducerID, err = pd.getInt64(); err != nil {
15 return err
16 }
17
18 if t.FirstOffset, err = pd.getInt64(); err != nil {
19 return err
20 }
21
22 return nil
23}
24
25func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
26 pe.putInt64(t.ProducerID)
27 pe.putInt64(t.FirstOffset)
28
29 return nil
30}
31
32type FetchResponseBlock struct {
33 Err KError
34 HighWaterMarkOffset int64
35 LastStableOffset int64
36 AbortedTransactions []*AbortedTransaction
37 Records *Records // deprecated: use FetchResponseBlock.RecordsSet
38 RecordsSet []*Records
39 Partial bool
40}
41
42func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
43 tmp, err := pd.getInt16()
44 if err != nil {
45 return err
46 }
47 b.Err = KError(tmp)
48
49 b.HighWaterMarkOffset, err = pd.getInt64()
50 if err != nil {
51 return err
52 }
53
54 if version >= 4 {
55 b.LastStableOffset, err = pd.getInt64()
56 if err != nil {
57 return err
58 }
59
60 numTransact, err := pd.getArrayLength()
61 if err != nil {
62 return err
63 }
64
65 if numTransact >= 0 {
66 b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
67 }
68
69 for i := 0; i < numTransact; i++ {
70 transact := new(AbortedTransaction)
71 if err = transact.decode(pd); err != nil {
72 return err
73 }
74 b.AbortedTransactions[i] = transact
75 }
76 }
77
78 recordsSize, err := pd.getInt32()
79 if err != nil {
80 return err
81 }
82
83 recordsDecoder, err := pd.getSubset(int(recordsSize))
84 if err != nil {
85 return err
86 }
87
88 b.RecordsSet = []*Records{}
89
90 for recordsDecoder.remaining() > 0 {
91 records := &Records{}
92 if err := records.decode(recordsDecoder); err != nil {
93 // If we have at least one decoded records, this is not an error
94 if err == ErrInsufficientData {
95 if len(b.RecordsSet) == 0 {
96 b.Partial = true
97 }
98 break
99 }
100 return err
101 }
102
103 partial, err := records.isPartial()
104 if err != nil {
105 return err
106 }
107
108 n, err := records.numRecords()
109 if err != nil {
110 return err
111 }
112
113 if n > 0 || (partial && len(b.RecordsSet) == 0) {
114 b.RecordsSet = append(b.RecordsSet, records)
115
116 if b.Records == nil {
117 b.Records = records
118 }
119 }
120
121 overflow, err := records.isOverflow()
122 if err != nil {
123 return err
124 }
125
126 if partial || overflow {
127 break
128 }
129 }
130
131 return nil
132}
133
134func (b *FetchResponseBlock) numRecords() (int, error) {
135 sum := 0
136
137 for _, records := range b.RecordsSet {
138 count, err := records.numRecords()
139 if err != nil {
140 return 0, err
141 }
142
143 sum += count
144 }
145
146 return sum, nil
147}
148
149func (b *FetchResponseBlock) isPartial() (bool, error) {
150 if b.Partial {
151 return true, nil
152 }
153
154 if len(b.RecordsSet) == 1 {
155 return b.RecordsSet[0].isPartial()
156 }
157
158 return false, nil
159}
160
161func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
162 pe.putInt16(int16(b.Err))
163
164 pe.putInt64(b.HighWaterMarkOffset)
165
166 if version >= 4 {
167 pe.putInt64(b.LastStableOffset)
168
169 if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
170 return err
171 }
172 for _, transact := range b.AbortedTransactions {
173 if err = transact.encode(pe); err != nil {
174 return err
175 }
176 }
177 }
178
179 pe.push(&lengthField{})
180 for _, records := range b.RecordsSet {
181 err = records.encode(pe)
182 if err != nil {
183 return err
184 }
185 }
186 return pe.pop()
187}
188
189func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
190 // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
191 // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
192 at := b.AbortedTransactions
193 sort.Slice(
194 at,
195 func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
196 )
197 return at
198}
199
200type FetchResponse struct {
201 Blocks map[string]map[int32]*FetchResponseBlock
202 ThrottleTime time.Duration
203 Version int16 // v1 requires 0.9+, v2 requires 0.10+
204 LogAppendTime bool
205 Timestamp time.Time
206}
207
208func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
209 r.Version = version
210
211 if r.Version >= 1 {
212 throttle, err := pd.getInt32()
213 if err != nil {
214 return err
215 }
216 r.ThrottleTime = time.Duration(throttle) * time.Millisecond
217 }
218
219 numTopics, err := pd.getArrayLength()
220 if err != nil {
221 return err
222 }
223
224 r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
225 for i := 0; i < numTopics; i++ {
226 name, err := pd.getString()
227 if err != nil {
228 return err
229 }
230
231 numBlocks, err := pd.getArrayLength()
232 if err != nil {
233 return err
234 }
235
236 r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
237
238 for j := 0; j < numBlocks; j++ {
239 id, err := pd.getInt32()
240 if err != nil {
241 return err
242 }
243
244 block := new(FetchResponseBlock)
245 err = block.decode(pd, version)
246 if err != nil {
247 return err
248 }
249 r.Blocks[name][id] = block
250 }
251 }
252
253 return nil
254}
255
256func (r *FetchResponse) encode(pe packetEncoder) (err error) {
257 if r.Version >= 1 {
258 pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
259 }
260
261 err = pe.putArrayLength(len(r.Blocks))
262 if err != nil {
263 return err
264 }
265
266 for topic, partitions := range r.Blocks {
267 err = pe.putString(topic)
268 if err != nil {
269 return err
270 }
271
272 err = pe.putArrayLength(len(partitions))
273 if err != nil {
274 return err
275 }
276
277 for id, block := range partitions {
278 pe.putInt32(id)
279 err = block.encode(pe, r.Version)
280 if err != nil {
281 return err
282 }
283 }
284
285 }
286 return nil
287}
288
289func (r *FetchResponse) key() int16 {
290 return 1
291}
292
293func (r *FetchResponse) version() int16 {
294 return r.Version
295}
296
297func (r *FetchResponse) requiredVersion() KafkaVersion {
298 switch r.Version {
299 case 1:
300 return V0_9_0_0
301 case 2:
302 return V0_10_0_0
303 case 3:
304 return V0_10_1_0
305 case 4:
306 return V0_11_0_0
307 default:
308 return MinVersion
309 }
310}
311
312func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
313 if r.Blocks == nil {
314 return nil
315 }
316
317 if r.Blocks[topic] == nil {
318 return nil
319 }
320
321 return r.Blocks[topic][partition]
322}
323
324func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
325 if r.Blocks == nil {
326 r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
327 }
328 partitions, ok := r.Blocks[topic]
329 if !ok {
330 partitions = make(map[int32]*FetchResponseBlock)
331 r.Blocks[topic] = partitions
332 }
333 frb, ok := partitions[partition]
334 if !ok {
335 frb = new(FetchResponseBlock)
336 partitions[partition] = frb
337 }
338 frb.Err = err
339}
340
341func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
342 if r.Blocks == nil {
343 r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
344 }
345 partitions, ok := r.Blocks[topic]
346 if !ok {
347 partitions = make(map[int32]*FetchResponseBlock)
348 r.Blocks[topic] = partitions
349 }
350 frb, ok := partitions[partition]
351 if !ok {
352 frb = new(FetchResponseBlock)
353 partitions[partition] = frb
354 }
355
356 return frb
357}
358
359func encodeKV(key, value Encoder) ([]byte, []byte) {
360 var kb []byte
361 var vb []byte
362 if key != nil {
363 kb, _ = key.Encode()
364 }
365 if value != nil {
366 vb, _ = value.Encode()
367 }
368
369 return kb, vb
370}
371
372func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
373 frb := r.getOrCreateBlock(topic, partition)
374 kb, vb := encodeKV(key, value)
375 if r.LogAppendTime {
376 timestamp = r.Timestamp
377 }
378 msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
379 msgBlock := &MessageBlock{Msg: msg, Offset: offset}
380 if len(frb.RecordsSet) == 0 {
381 records := newLegacyRecords(&MessageSet{})
382 frb.RecordsSet = []*Records{&records}
383 }
384 set := frb.RecordsSet[0].MsgSet
385 set.Messages = append(set.Messages, msgBlock)
386}
387
388func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
389 frb := r.getOrCreateBlock(topic, partition)
390 kb, vb := encodeKV(key, value)
391 if len(frb.RecordsSet) == 0 {
392 records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
393 frb.RecordsSet = []*Records{&records}
394 }
395 batch := frb.RecordsSet[0].RecordBatch
396 rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
397 batch.addRecord(rec)
398}
399
400// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
401// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
402// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
403func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
404 frb := r.getOrCreateBlock(topic, partition)
405 kb, vb := encodeKV(key, value)
406
407 records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
408 batch := &RecordBatch{
409 Version: 2,
410 LogAppendTime: r.LogAppendTime,
411 FirstTimestamp: timestamp,
412 MaxTimestamp: r.Timestamp,
413 FirstOffset: offset,
414 LastOffsetDelta: 0,
415 ProducerID: producerID,
416 IsTransactional: isTransactional,
417 }
418 rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
419 batch.addRecord(rec)
420 records.RecordBatch = batch
421
422 frb.RecordsSet = append(frb.RecordsSet, &records)
423}
424
425func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
426 frb := r.getOrCreateBlock(topic, partition)
427
428 // batch
429 batch := &RecordBatch{
430 Version: 2,
431 LogAppendTime: r.LogAppendTime,
432 FirstTimestamp: timestamp,
433 MaxTimestamp: r.Timestamp,
434 FirstOffset: offset,
435 LastOffsetDelta: 0,
436 ProducerID: producerID,
437 IsTransactional: true,
438 Control: true,
439 }
440
441 // records
442 records := newDefaultRecords(nil)
443 records.RecordBatch = batch
444
445 // record
446 crAbort := ControlRecord{
447 Version: 0,
448 Type: recordType,
449 }
450 crKey := &realEncoder{raw: make([]byte, 4)}
451 crValue := &realEncoder{raw: make([]byte, 6)}
452 crAbort.encode(crKey, crValue)
453 rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
454 batch.addRecord(rec)
455
456 frb.RecordsSet = append(frb.RecordsSet, &records)
457}
458
459func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
460 r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
461}
462
463func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
464 r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
465}
466
467func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
468 r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
469}
470
471func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
472 // define controlRecord key and value
473 r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
474}
475
476func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
477 frb := r.getOrCreateBlock(topic, partition)
478 if len(frb.RecordsSet) == 0 {
479 records := newDefaultRecords(&RecordBatch{Version: 2})
480 frb.RecordsSet = []*Records{&records}
481 }
482 batch := frb.RecordsSet[0].RecordBatch
483 batch.LastOffsetDelta = offset
484}
485
486func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
487 frb := r.getOrCreateBlock(topic, partition)
488 frb.LastStableOffset = offset
489}