blob: 057a57a2b7fb7748bf10313620244e69c85a3fc0 [file] [log] [blame]
Scott Baker105df152020-04-13 15:55:14 -07001package sarama
2
3import (
4 "math/rand"
5 "sort"
6 "sync"
7 "time"
8)
9
10// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
11// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
12// automatically when it passes out of scope. It is safe to share a client amongst many
13// users, however Kafka will process requests from a single client strictly in serial,
14// so it is generally more efficient to use the default one client per producer/consumer.
15type Client interface {
16 // Config returns the Config struct of the client. This struct should not be
17 // altered after it has been created.
18 Config() *Config
19
20 // Controller returns the cluster controller broker. It will return a
21 // locally cached value if it's available. You can call RefreshController
22 // to update the cached value. Requires Kafka 0.10 or higher.
23 Controller() (*Broker, error)
24
25 // RefreshController retrieves the cluster controller from fresh metadata
26 // and stores it in the local cache. Requires Kafka 0.10 or higher.
27 RefreshController() (*Broker, error)
28
29 // Brokers returns the current set of active brokers as retrieved from cluster metadata.
30 Brokers() []*Broker
31
32 // Topics returns the set of available topics as retrieved from cluster metadata.
33 Topics() ([]string, error)
34
35 // Partitions returns the sorted list of all partition IDs for the given topic.
36 Partitions(topic string) ([]int32, error)
37
38 // WritablePartitions returns the sorted list of all writable partition IDs for
39 // the given topic, where "writable" means "having a valid leader accepting
40 // writes".
41 WritablePartitions(topic string) ([]int32, error)
42
43 // Leader returns the broker object that is the leader of the current
44 // topic/partition, as determined by querying the cluster metadata.
45 Leader(topic string, partitionID int32) (*Broker, error)
46
47 // Replicas returns the set of all replica IDs for the given partition.
48 Replicas(topic string, partitionID int32) ([]int32, error)
49
50 // InSyncReplicas returns the set of all in-sync replica IDs for the given
51 // partition. In-sync replicas are replicas which are fully caught up with
52 // the partition leader.
53 InSyncReplicas(topic string, partitionID int32) ([]int32, error)
54
55 // OfflineReplicas returns the set of all offline replica IDs for the given
56 // partition. Offline replicas are replicas which are offline
57 OfflineReplicas(topic string, partitionID int32) ([]int32, error)
58
59 // RefreshMetadata takes a list of topics and queries the cluster to refresh the
60 // available metadata for those topics. If no topics are provided, it will refresh
61 // metadata for all topics.
62 RefreshMetadata(topics ...string) error
63
64 // GetOffset queries the cluster to get the most recent available offset at the
65 // given time (in milliseconds) on the topic/partition combination.
66 // Time should be OffsetOldest for the earliest available offset,
67 // OffsetNewest for the offset of the message that will be produced next, or a time.
68 GetOffset(topic string, partitionID int32, time int64) (int64, error)
69
70 // Coordinator returns the coordinating broker for a consumer group. It will
71 // return a locally cached value if it's available. You can call
72 // RefreshCoordinator to update the cached value. This function only works on
73 // Kafka 0.8.2 and higher.
74 Coordinator(consumerGroup string) (*Broker, error)
75
76 // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
77 // in local cache. This function only works on Kafka 0.8.2 and higher.
78 RefreshCoordinator(consumerGroup string) error
79
80 // InitProducerID retrieves information required for Idempotent Producer
81 InitProducerID() (*InitProducerIDResponse, error)
82
83 // Close shuts down all broker connections managed by this client. It is required
84 // to call this function before a client object passes out of scope, as it will
85 // otherwise leak memory. You must close any Producers or Consumers using a client
86 // before you close the client.
87 Close() error
88
89 // Closed returns true if the client has already had Close called on it
90 Closed() bool
91}
92
93const (
94 // OffsetNewest stands for the log head offset, i.e. the offset that will be
95 // assigned to the next message that will be produced to the partition. You
96 // can send this to a client's GetOffset method to get this offset, or when
97 // calling ConsumePartition to start consuming new messages.
98 OffsetNewest int64 = -1
99 // OffsetOldest stands for the oldest offset available on the broker for a
100 // partition. You can send this to a client's GetOffset method to get this
101 // offset, or when calling ConsumePartition to start consuming from the
102 // oldest offset that is still available on the broker.
103 OffsetOldest int64 = -2
104)
105
106type client struct {
107 conf *Config
108 closer, closed chan none // for shutting down background metadata updater
109
110 // the broker addresses given to us through the constructor are not guaranteed to be returned in
111 // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
112 // so we store them separately
113 seedBrokers []*Broker
114 deadSeeds []*Broker
115
116 controllerID int32 // cluster controller broker id
117 brokers map[int32]*Broker // maps broker ids to brokers
118 metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
119 metadataTopics map[string]none // topics that need to collect metadata
120 coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
121
122 // If the number of partitions is large, we can get some churn calling cachedPartitions,
123 // so the result is cached. It is important to update this value whenever metadata is changed
124 cachedPartitionsResults map[string][maxPartitionIndex][]int32
125
126 lock sync.RWMutex // protects access to the maps that hold cluster state.
127}
128
129// NewClient creates a new Client. It connects to one of the given broker addresses
130// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
131// be retrieved from any of the given broker addresses, the client is not created.
132func NewClient(addrs []string, conf *Config) (Client, error) {
133 Logger.Println("Initializing new client")
134
135 if conf == nil {
136 conf = NewConfig()
137 }
138
139 if err := conf.Validate(); err != nil {
140 return nil, err
141 }
142
143 if len(addrs) < 1 {
144 return nil, ConfigurationError("You must provide at least one broker address")
145 }
146
147 client := &client{
148 conf: conf,
149 closer: make(chan none),
150 closed: make(chan none),
151 brokers: make(map[int32]*Broker),
152 metadata: make(map[string]map[int32]*PartitionMetadata),
153 metadataTopics: make(map[string]none),
154 cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
155 coordinators: make(map[string]int32),
156 }
157
158 random := rand.New(rand.NewSource(time.Now().UnixNano()))
159 for _, index := range random.Perm(len(addrs)) {
160 client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
161 }
162
163 if conf.Metadata.Full {
164 // do an initial fetch of all cluster metadata by specifying an empty list of topics
165 err := client.RefreshMetadata()
166 switch err {
167 case nil:
168 break
169 case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
170 // indicates that maybe part of the cluster is down, but is not fatal to creating the client
171 Logger.Println(err)
172 default:
173 close(client.closed) // we haven't started the background updater yet, so we have to do this manually
174 _ = client.Close()
175 return nil, err
176 }
177 }
178 go withRecover(client.backgroundMetadataUpdater)
179
180 Logger.Println("Successfully initialized new client")
181
182 return client, nil
183}
184
185func (client *client) Config() *Config {
186 return client.conf
187}
188
189func (client *client) Brokers() []*Broker {
190 client.lock.RLock()
191 defer client.lock.RUnlock()
192 brokers := make([]*Broker, 0, len(client.brokers))
193 for _, broker := range client.brokers {
194 brokers = append(brokers, broker)
195 }
196 return brokers
197}
198
199func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
200 var err error
201 for broker := client.any(); broker != nil; broker = client.any() {
202 req := &InitProducerIDRequest{}
203
204 response, err := broker.InitProducerID(req)
205 switch err.(type) {
206 case nil:
207 return response, nil
208 default:
209 // some error, remove that broker and try again
210 Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
211 _ = broker.Close()
212 client.deregisterBroker(broker)
213 }
214 }
215 return nil, err
216}
217
218func (client *client) Close() error {
219 if client.Closed() {
220 // Chances are this is being called from a defer() and the error will go unobserved
221 // so we go ahead and log the event in this case.
222 Logger.Printf("Close() called on already closed client")
223 return ErrClosedClient
224 }
225
226 // shutdown and wait for the background thread before we take the lock, to avoid races
227 close(client.closer)
228 <-client.closed
229
230 client.lock.Lock()
231 defer client.lock.Unlock()
232 Logger.Println("Closing Client")
233
234 for _, broker := range client.brokers {
235 safeAsyncClose(broker)
236 }
237
238 for _, broker := range client.seedBrokers {
239 safeAsyncClose(broker)
240 }
241
242 client.brokers = nil
243 client.metadata = nil
244 client.metadataTopics = nil
245
246 return nil
247}
248
249func (client *client) Closed() bool {
250 client.lock.RLock()
251 defer client.lock.RUnlock()
252
253 return client.brokers == nil
254}
255
256func (client *client) Topics() ([]string, error) {
257 if client.Closed() {
258 return nil, ErrClosedClient
259 }
260
261 client.lock.RLock()
262 defer client.lock.RUnlock()
263
264 ret := make([]string, 0, len(client.metadata))
265 for topic := range client.metadata {
266 ret = append(ret, topic)
267 }
268
269 return ret, nil
270}
271
272func (client *client) MetadataTopics() ([]string, error) {
273 if client.Closed() {
274 return nil, ErrClosedClient
275 }
276
277 client.lock.RLock()
278 defer client.lock.RUnlock()
279
280 ret := make([]string, 0, len(client.metadataTopics))
281 for topic := range client.metadataTopics {
282 ret = append(ret, topic)
283 }
284
285 return ret, nil
286}
287
288func (client *client) Partitions(topic string) ([]int32, error) {
289 if client.Closed() {
290 return nil, ErrClosedClient
291 }
292
293 partitions := client.cachedPartitions(topic, allPartitions)
294
295 if len(partitions) == 0 {
296 err := client.RefreshMetadata(topic)
297 if err != nil {
298 return nil, err
299 }
300 partitions = client.cachedPartitions(topic, allPartitions)
301 }
302
303 // no partitions found after refresh metadata
304 if len(partitions) == 0 {
305 return nil, ErrUnknownTopicOrPartition
306 }
307
308 return partitions, nil
309}
310
311func (client *client) WritablePartitions(topic string) ([]int32, error) {
312 if client.Closed() {
313 return nil, ErrClosedClient
314 }
315
316 partitions := client.cachedPartitions(topic, writablePartitions)
317
318 // len==0 catches when it's nil (no such topic) and the odd case when every single
319 // partition is undergoing leader election simultaneously. Callers have to be able to handle
320 // this function returning an empty slice (which is a valid return value) but catching it
321 // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
322 // a metadata refresh as a nicety so callers can just try again and don't have to manually
323 // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
324 if len(partitions) == 0 {
325 err := client.RefreshMetadata(topic)
326 if err != nil {
327 return nil, err
328 }
329 partitions = client.cachedPartitions(topic, writablePartitions)
330 }
331
332 if partitions == nil {
333 return nil, ErrUnknownTopicOrPartition
334 }
335
336 return partitions, nil
337}
338
339func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
340 if client.Closed() {
341 return nil, ErrClosedClient
342 }
343
344 metadata := client.cachedMetadata(topic, partitionID)
345
346 if metadata == nil {
347 err := client.RefreshMetadata(topic)
348 if err != nil {
349 return nil, err
350 }
351 metadata = client.cachedMetadata(topic, partitionID)
352 }
353
354 if metadata == nil {
355 return nil, ErrUnknownTopicOrPartition
356 }
357
358 if metadata.Err == ErrReplicaNotAvailable {
359 return dupInt32Slice(metadata.Replicas), metadata.Err
360 }
361 return dupInt32Slice(metadata.Replicas), nil
362}
363
364func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
365 if client.Closed() {
366 return nil, ErrClosedClient
367 }
368
369 metadata := client.cachedMetadata(topic, partitionID)
370
371 if metadata == nil {
372 err := client.RefreshMetadata(topic)
373 if err != nil {
374 return nil, err
375 }
376 metadata = client.cachedMetadata(topic, partitionID)
377 }
378
379 if metadata == nil {
380 return nil, ErrUnknownTopicOrPartition
381 }
382
383 if metadata.Err == ErrReplicaNotAvailable {
384 return dupInt32Slice(metadata.Isr), metadata.Err
385 }
386 return dupInt32Slice(metadata.Isr), nil
387}
388
389func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
390 if client.Closed() {
391 return nil, ErrClosedClient
392 }
393
394 metadata := client.cachedMetadata(topic, partitionID)
395
396 if metadata == nil {
397 err := client.RefreshMetadata(topic)
398 if err != nil {
399 return nil, err
400 }
401 metadata = client.cachedMetadata(topic, partitionID)
402 }
403
404 if metadata == nil {
405 return nil, ErrUnknownTopicOrPartition
406 }
407
408 if metadata.Err == ErrReplicaNotAvailable {
409 return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
410 }
411 return dupInt32Slice(metadata.OfflineReplicas), nil
412}
413
414func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
415 if client.Closed() {
416 return nil, ErrClosedClient
417 }
418
419 leader, err := client.cachedLeader(topic, partitionID)
420
421 if leader == nil {
422 err = client.RefreshMetadata(topic)
423 if err != nil {
424 return nil, err
425 }
426 leader, err = client.cachedLeader(topic, partitionID)
427 }
428
429 return leader, err
430}
431
432func (client *client) RefreshMetadata(topics ...string) error {
433 if client.Closed() {
434 return ErrClosedClient
435 }
436
437 // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
438 // error. This handles the case by returning an error instead of sending it
439 // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
440 for _, topic := range topics {
441 if len(topic) == 0 {
442 return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
443 }
444 }
445
446 deadline := time.Time{}
447 if client.conf.Metadata.Timeout > 0 {
448 deadline = time.Now().Add(client.conf.Metadata.Timeout)
449 }
450 return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
451}
452
453func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
454 if client.Closed() {
455 return -1, ErrClosedClient
456 }
457
458 offset, err := client.getOffset(topic, partitionID, time)
459
460 if err != nil {
461 if err := client.RefreshMetadata(topic); err != nil {
462 return -1, err
463 }
464 return client.getOffset(topic, partitionID, time)
465 }
466
467 return offset, err
468}
469
470func (client *client) Controller() (*Broker, error) {
471 if client.Closed() {
472 return nil, ErrClosedClient
473 }
474
475 if !client.conf.Version.IsAtLeast(V0_10_0_0) {
476 return nil, ErrUnsupportedVersion
477 }
478
479 controller := client.cachedController()
480 if controller == nil {
481 if err := client.refreshMetadata(); err != nil {
482 return nil, err
483 }
484 controller = client.cachedController()
485 }
486
487 if controller == nil {
488 return nil, ErrControllerNotAvailable
489 }
490
491 _ = controller.Open(client.conf)
492 return controller, nil
493}
494
495// deregisterController removes the cached controllerID
496func (client *client) deregisterController() {
497 client.lock.Lock()
498 defer client.lock.Unlock()
499 delete(client.brokers, client.controllerID)
500}
501
502// RefreshController retrieves the cluster controller from fresh metadata
503// and stores it in the local cache. Requires Kafka 0.10 or higher.
504func (client *client) RefreshController() (*Broker, error) {
505 if client.Closed() {
506 return nil, ErrClosedClient
507 }
508
509 client.deregisterController()
510
511 if err := client.refreshMetadata(); err != nil {
512 return nil, err
513 }
514
515 controller := client.cachedController()
516 if controller == nil {
517 return nil, ErrControllerNotAvailable
518 }
519
520 _ = controller.Open(client.conf)
521 return controller, nil
522}
523
524func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
525 if client.Closed() {
526 return nil, ErrClosedClient
527 }
528
529 coordinator := client.cachedCoordinator(consumerGroup)
530
531 if coordinator == nil {
532 if err := client.RefreshCoordinator(consumerGroup); err != nil {
533 return nil, err
534 }
535 coordinator = client.cachedCoordinator(consumerGroup)
536 }
537
538 if coordinator == nil {
539 return nil, ErrConsumerCoordinatorNotAvailable
540 }
541
542 _ = coordinator.Open(client.conf)
543 return coordinator, nil
544}
545
546func (client *client) RefreshCoordinator(consumerGroup string) error {
547 if client.Closed() {
548 return ErrClosedClient
549 }
550
551 response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
552 if err != nil {
553 return err
554 }
555
556 client.lock.Lock()
557 defer client.lock.Unlock()
558 client.registerBroker(response.Coordinator)
559 client.coordinators[consumerGroup] = response.Coordinator.ID()
560 return nil
561}
562
563// private broker management helpers
564
565// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
566// in the brokers map. It returns the broker that is registered, which may be the provided broker,
567// or a previously registered Broker instance. You must hold the write lock before calling this function.
568func (client *client) registerBroker(broker *Broker) {
569 if client.brokers == nil {
570 Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
571 return
572 }
573
574 if client.brokers[broker.ID()] == nil {
575 client.brokers[broker.ID()] = broker
576 Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
577 } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
578 safeAsyncClose(client.brokers[broker.ID()])
579 client.brokers[broker.ID()] = broker
580 Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
581 }
582}
583
584// deregisterBroker removes a broker from the seedsBroker list, and if it's
585// not the seedbroker, removes it from brokers map completely.
586func (client *client) deregisterBroker(broker *Broker) {
587 client.lock.Lock()
588 defer client.lock.Unlock()
589
590 if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
591 client.deadSeeds = append(client.deadSeeds, broker)
592 client.seedBrokers = client.seedBrokers[1:]
593 } else {
594 // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
595 // but we really shouldn't have to; once that loop is made better this case can be
596 // removed, and the function generally can be renamed from `deregisterBroker` to
597 // `nextSeedBroker` or something
598 Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
599 delete(client.brokers, broker.ID())
600 }
601}
602
603func (client *client) resurrectDeadBrokers() {
604 client.lock.Lock()
605 defer client.lock.Unlock()
606
607 Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
608 client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
609 client.deadSeeds = nil
610}
611
612func (client *client) any() *Broker {
613 client.lock.RLock()
614 defer client.lock.RUnlock()
615
616 if len(client.seedBrokers) > 0 {
617 _ = client.seedBrokers[0].Open(client.conf)
618 return client.seedBrokers[0]
619 }
620
621 // not guaranteed to be random *or* deterministic
622 for _, broker := range client.brokers {
623 _ = broker.Open(client.conf)
624 return broker
625 }
626
627 return nil
628}
629
630// private caching/lazy metadata helpers
631
632type partitionType int
633
634const (
635 allPartitions partitionType = iota
636 writablePartitions
637 // If you add any more types, update the partition cache in update()
638
639 // Ensure this is the last partition type value
640 maxPartitionIndex
641)
642
643func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
644 client.lock.RLock()
645 defer client.lock.RUnlock()
646
647 partitions := client.metadata[topic]
648 if partitions != nil {
649 return partitions[partitionID]
650 }
651
652 return nil
653}
654
655func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
656 client.lock.RLock()
657 defer client.lock.RUnlock()
658
659 partitions, exists := client.cachedPartitionsResults[topic]
660
661 if !exists {
662 return nil
663 }
664 return partitions[partitionSet]
665}
666
667func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
668 partitions := client.metadata[topic]
669
670 if partitions == nil {
671 return nil
672 }
673
674 ret := make([]int32, 0, len(partitions))
675 for _, partition := range partitions {
676 if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
677 continue
678 }
679 ret = append(ret, partition.ID)
680 }
681
682 sort.Sort(int32Slice(ret))
683 return ret
684}
685
686func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
687 client.lock.RLock()
688 defer client.lock.RUnlock()
689
690 partitions := client.metadata[topic]
691 if partitions != nil {
692 metadata, ok := partitions[partitionID]
693 if ok {
694 if metadata.Err == ErrLeaderNotAvailable {
695 return nil, ErrLeaderNotAvailable
696 }
697 b := client.brokers[metadata.Leader]
698 if b == nil {
699 return nil, ErrLeaderNotAvailable
700 }
701 _ = b.Open(client.conf)
702 return b, nil
703 }
704 }
705
706 return nil, ErrUnknownTopicOrPartition
707}
708
709func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
710 broker, err := client.Leader(topic, partitionID)
711 if err != nil {
712 return -1, err
713 }
714
715 request := &OffsetRequest{}
716 if client.conf.Version.IsAtLeast(V0_10_1_0) {
717 request.Version = 1
718 }
719 request.AddBlock(topic, partitionID, time, 1)
720
721 response, err := broker.GetAvailableOffsets(request)
722 if err != nil {
723 _ = broker.Close()
724 return -1, err
725 }
726
727 block := response.GetBlock(topic, partitionID)
728 if block == nil {
729 _ = broker.Close()
730 return -1, ErrIncompleteResponse
731 }
732 if block.Err != ErrNoError {
733 return -1, block.Err
734 }
735 if len(block.Offsets) != 1 {
736 return -1, ErrOffsetOutOfRange
737 }
738
739 return block.Offsets[0], nil
740}
741
742// core metadata update logic
743
744func (client *client) backgroundMetadataUpdater() {
745 defer close(client.closed)
746
747 if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
748 return
749 }
750
751 ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
752 defer ticker.Stop()
753
754 for {
755 select {
756 case <-ticker.C:
757 if err := client.refreshMetadata(); err != nil {
758 Logger.Println("Client background metadata update:", err)
759 }
760 case <-client.closer:
761 return
762 }
763 }
764}
765
766func (client *client) refreshMetadata() error {
767 topics := []string{}
768
769 if !client.conf.Metadata.Full {
770 if specificTopics, err := client.MetadataTopics(); err != nil {
771 return err
772 } else if len(specificTopics) == 0 {
773 return ErrNoTopicsToUpdateMetadata
774 } else {
775 topics = specificTopics
776 }
777 }
778
779 if err := client.RefreshMetadata(topics...); err != nil {
780 return err
781 }
782
783 return nil
784}
785
786func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
787 pastDeadline := func(backoff time.Duration) bool {
788 if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
789 // we are past the deadline
790 return true
791 }
792 return false
793 }
794 retry := func(err error) error {
795 if attemptsRemaining > 0 {
796 backoff := client.computeBackoff(attemptsRemaining)
797 if pastDeadline(backoff) {
798 Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
799 return err
800 }
801 Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
802 if backoff > 0 {
803 time.Sleep(backoff)
804 }
805 return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
806 }
807 return err
808 }
809
810 broker := client.any()
811 for ; broker != nil && !pastDeadline(0); broker = client.any() {
812 allowAutoTopicCreation := true
813 if len(topics) > 0 {
814 Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
815 } else {
816 allowAutoTopicCreation = false
817 Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
818 }
819
820 req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
821 if client.conf.Version.IsAtLeast(V1_0_0_0) {
822 req.Version = 5
823 } else if client.conf.Version.IsAtLeast(V0_10_0_0) {
824 req.Version = 1
825 }
826 response, err := broker.GetMetadata(req)
827 switch err.(type) {
828 case nil:
829 allKnownMetaData := len(topics) == 0
830 // valid response, use it
831 shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
832 if shouldRetry {
833 Logger.Println("client/metadata found some partitions to be leaderless")
834 return retry(err) // note: err can be nil
835 }
836 return err
837
838 case PacketEncodingError:
839 // didn't even send, return the error
840 return err
841
842 case KError:
843 // if SASL auth error return as this _should_ be a non retryable err for all brokers
844 if err.(KError) == ErrSASLAuthenticationFailed {
845 Logger.Println("client/metadata failed SASL authentication")
846 return err
847 }
848
849 if err.(KError) == ErrTopicAuthorizationFailed {
850 Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
851 return err
852 }
853 // else remove that broker and try again
854 Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
855 _ = broker.Close()
856 client.deregisterBroker(broker)
857
858 default:
859 // some other error, remove that broker and try again
860 Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
861 _ = broker.Close()
862 client.deregisterBroker(broker)
863 }
864 }
865
866 if broker != nil {
867 Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
868 return retry(ErrOutOfBrokers)
869 }
870
871 Logger.Println("client/metadata no available broker to send metadata request to")
872 client.resurrectDeadBrokers()
873 return retry(ErrOutOfBrokers)
874}
875
876// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
877func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
878 if client.Closed() {
879 return
880 }
881
882 client.lock.Lock()
883 defer client.lock.Unlock()
884
885 // For all the brokers we received:
886 // - if it is a new ID, save it
887 // - if it is an existing ID, but the address we have is stale, discard the old one and save it
888 // - otherwise ignore it, replacing our existing one would just bounce the connection
889 for _, broker := range data.Brokers {
890 client.registerBroker(broker)
891 }
892
893 client.controllerID = data.ControllerID
894
895 if allKnownMetaData {
896 client.metadata = make(map[string]map[int32]*PartitionMetadata)
897 client.metadataTopics = make(map[string]none)
898 client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
899 }
900 for _, topic := range data.Topics {
901 // topics must be added firstly to `metadataTopics` to guarantee that all
902 // requested topics must be recorded to keep them trackable for periodically
903 // metadata refresh.
904 if _, exists := client.metadataTopics[topic.Name]; !exists {
905 client.metadataTopics[topic.Name] = none{}
906 }
907 delete(client.metadata, topic.Name)
908 delete(client.cachedPartitionsResults, topic.Name)
909
910 switch topic.Err {
911 case ErrNoError:
912 // no-op
913 case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
914 err = topic.Err
915 continue
916 case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
917 err = topic.Err
918 retry = true
919 continue
920 case ErrLeaderNotAvailable: // retry, but store partial partition results
921 retry = true
922 default: // don't retry, don't store partial results
923 Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
924 err = topic.Err
925 continue
926 }
927
928 client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
929 for _, partition := range topic.Partitions {
930 client.metadata[topic.Name][partition.ID] = partition
931 if partition.Err == ErrLeaderNotAvailable {
932 retry = true
933 }
934 }
935
936 var partitionCache [maxPartitionIndex][]int32
937 partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
938 partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
939 client.cachedPartitionsResults[topic.Name] = partitionCache
940 }
941
942 return
943}
944
945func (client *client) cachedCoordinator(consumerGroup string) *Broker {
946 client.lock.RLock()
947 defer client.lock.RUnlock()
948 if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
949 return client.brokers[coordinatorID]
950 }
951 return nil
952}
953
954func (client *client) cachedController() *Broker {
955 client.lock.RLock()
956 defer client.lock.RUnlock()
957
958 return client.brokers[client.controllerID]
959}
960
961func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
962 if client.conf.Metadata.Retry.BackoffFunc != nil {
963 maxRetries := client.conf.Metadata.Retry.Max
964 retries := maxRetries - attemptsRemaining
965 return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
966 }
967 return client.conf.Metadata.Retry.Backoff
968}
969
970func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
971 retry := func(err error) (*FindCoordinatorResponse, error) {
972 if attemptsRemaining > 0 {
973 backoff := client.computeBackoff(attemptsRemaining)
974 Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
975 time.Sleep(backoff)
976 return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
977 }
978 return nil, err
979 }
980
981 for broker := client.any(); broker != nil; broker = client.any() {
982 Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
983
984 request := new(FindCoordinatorRequest)
985 request.CoordinatorKey = consumerGroup
986 request.CoordinatorType = CoordinatorGroup
987
988 response, err := broker.FindCoordinator(request)
989
990 if err != nil {
991 Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
992
993 switch err.(type) {
994 case PacketEncodingError:
995 return nil, err
996 default:
997 _ = broker.Close()
998 client.deregisterBroker(broker)
999 continue
1000 }
1001 }
1002
1003 switch response.Err {
1004 case ErrNoError:
1005 Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
1006 return response, nil
1007
1008 case ErrConsumerCoordinatorNotAvailable:
1009 Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
1010
1011 // This is very ugly, but this scenario will only happen once per cluster.
1012 // The __consumer_offsets topic only has to be created one time.
1013 // The number of partitions not configurable, but partition 0 should always exist.
1014 if _, err := client.Leader("__consumer_offsets", 0); err != nil {
1015 Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
1016 time.Sleep(2 * time.Second)
1017 }
1018
1019 return retry(ErrConsumerCoordinatorNotAvailable)
1020 case ErrGroupAuthorizationFailed:
1021 Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
1022 return retry(ErrGroupAuthorizationFailed)
1023
1024 default:
1025 return nil, response.Err
1026 }
1027 }
1028
1029 Logger.Println("client/coordinator no available broker to send consumer metadata request to")
1030 client.resurrectDeadBrokers()
1031 return retry(ErrOutOfBrokers)
1032}
1033
1034// nopCloserClient embeds an existing Client, but disables
1035// the Close method (yet all other methods pass
1036// through unchanged). This is for use in larger structs
1037// where it is undesirable to close the client that was
1038// passed in by the caller.
1039type nopCloserClient struct {
1040 Client
1041}
1042
1043// Close intercepts and purposely does not call the underlying
1044// client's Close() method.
1045func (ncc *nopCloserClient) Close() error {
1046 return nil
1047}