blob: 61ad12ccb0adcba1bcbe54651f377a735af0f6a4 [file] [log] [blame]
William Kurkianea869482019-04-09 15:16:11 -04001// Copyright 2015 The etcd Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package raft
16
17import (
18 "bytes"
19 "errors"
20 "fmt"
21 "math"
22 "math/rand"
23 "sort"
24 "strings"
25 "sync"
26 "time"
27
28 pb "go.etcd.io/etcd/raft/raftpb"
29)
30
31// None is a placeholder node ID used when there is no leader.
32const None uint64 = 0
33const noLimit = math.MaxUint64
34
35// Possible values for StateType.
36const (
37 StateFollower StateType = iota
38 StateCandidate
39 StateLeader
40 StatePreCandidate
41 numStates
42)
43
44type ReadOnlyOption int
45
46const (
47 // ReadOnlySafe guarantees the linearizability of the read only request by
48 // communicating with the quorum. It is the default and suggested option.
49 ReadOnlySafe ReadOnlyOption = iota
50 // ReadOnlyLeaseBased ensures linearizability of the read only request by
51 // relying on the leader lease. It can be affected by clock drift.
52 // If the clock drift is unbounded, leader might keep the lease longer than it
53 // should (clock can move backward/pause without any bound). ReadIndex is not safe
54 // in that case.
55 ReadOnlyLeaseBased
56)
57
58// Possible values for CampaignType
59const (
60 // campaignPreElection represents the first phase of a normal election when
61 // Config.PreVote is true.
62 campaignPreElection CampaignType = "CampaignPreElection"
63 // campaignElection represents a normal (time-based) election (the second phase
64 // of the election when Config.PreVote is true).
65 campaignElection CampaignType = "CampaignElection"
66 // campaignTransfer represents the type of leader transfer
67 campaignTransfer CampaignType = "CampaignTransfer"
68)
69
70// ErrProposalDropped is returned when the proposal is ignored by some cases,
71// so that the proposer can be notified and fail fast.
72var ErrProposalDropped = errors.New("raft proposal dropped")
73
74// lockedRand is a small wrapper around rand.Rand to provide
75// synchronization among multiple raft groups. Only the methods needed
76// by the code are exposed (e.g. Intn).
77type lockedRand struct {
78 mu sync.Mutex
79 rand *rand.Rand
80}
81
82func (r *lockedRand) Intn(n int) int {
83 r.mu.Lock()
84 v := r.rand.Intn(n)
85 r.mu.Unlock()
86 return v
87}
88
89var globalRand = &lockedRand{
90 rand: rand.New(rand.NewSource(time.Now().UnixNano())),
91}
92
93// CampaignType represents the type of campaigning
94// the reason we use the type of string instead of uint64
95// is because it's simpler to compare and fill in raft entries
96type CampaignType string
97
98// StateType represents the role of a node in a cluster.
99type StateType uint64
100
101var stmap = [...]string{
102 "StateFollower",
103 "StateCandidate",
104 "StateLeader",
105 "StatePreCandidate",
106}
107
108func (st StateType) String() string {
109 return stmap[uint64(st)]
110}
111
112// Config contains the parameters to start a raft.
113type Config struct {
114 // ID is the identity of the local raft. ID cannot be 0.
115 ID uint64
116
117 // peers contains the IDs of all nodes (including self) in the raft cluster. It
118 // should only be set when starting a new raft cluster. Restarting raft from
119 // previous configuration will panic if peers is set. peer is private and only
120 // used for testing right now.
121 peers []uint64
122
123 // learners contains the IDs of all learner nodes (including self if the
124 // local node is a learner) in the raft cluster. learners only receives
125 // entries from the leader node. It does not vote or promote itself.
126 learners []uint64
127
128 // ElectionTick is the number of Node.Tick invocations that must pass between
129 // elections. That is, if a follower does not receive any message from the
130 // leader of current term before ElectionTick has elapsed, it will become
131 // candidate and start an election. ElectionTick must be greater than
132 // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
133 // unnecessary leader switching.
134 ElectionTick int
135 // HeartbeatTick is the number of Node.Tick invocations that must pass between
136 // heartbeats. That is, a leader sends heartbeat messages to maintain its
137 // leadership every HeartbeatTick ticks.
138 HeartbeatTick int
139
140 // Storage is the storage for raft. raft generates entries and states to be
141 // stored in storage. raft reads the persisted entries and states out of
142 // Storage when it needs. raft reads out the previous state and configuration
143 // out of storage when restarting.
144 Storage Storage
145 // Applied is the last applied index. It should only be set when restarting
146 // raft. raft will not return entries to the application smaller or equal to
147 // Applied. If Applied is unset when restarting, raft might return previous
148 // applied entries. This is a very application dependent configuration.
149 Applied uint64
150
151 // MaxSizePerMsg limits the max byte size of each append message. Smaller
152 // value lowers the raft recovery cost(initial probing and message lost
153 // during normal operation). On the other side, it might affect the
154 // throughput during normal replication. Note: math.MaxUint64 for unlimited,
155 // 0 for at most one entry per message.
156 MaxSizePerMsg uint64
157 // MaxCommittedSizePerReady limits the size of the committed entries which
158 // can be applied.
159 MaxCommittedSizePerReady uint64
160 // MaxUncommittedEntriesSize limits the aggregate byte size of the
161 // uncommitted entries that may be appended to a leader's log. Once this
162 // limit is exceeded, proposals will begin to return ErrProposalDropped
163 // errors. Note: 0 for no limit.
164 MaxUncommittedEntriesSize uint64
165 // MaxInflightMsgs limits the max number of in-flight append messages during
166 // optimistic replication phase. The application transportation layer usually
167 // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
168 // overflowing that sending buffer. TODO (xiangli): feedback to application to
169 // limit the proposal rate?
170 MaxInflightMsgs int
171
172 // CheckQuorum specifies if the leader should check quorum activity. Leader
173 // steps down when quorum is not active for an electionTimeout.
174 CheckQuorum bool
175
176 // PreVote enables the Pre-Vote algorithm described in raft thesis section
177 // 9.6. This prevents disruption when a node that has been partitioned away
178 // rejoins the cluster.
179 PreVote bool
180
181 // ReadOnlyOption specifies how the read only request is processed.
182 //
183 // ReadOnlySafe guarantees the linearizability of the read only request by
184 // communicating with the quorum. It is the default and suggested option.
185 //
186 // ReadOnlyLeaseBased ensures linearizability of the read only request by
187 // relying on the leader lease. It can be affected by clock drift.
188 // If the clock drift is unbounded, leader might keep the lease longer than it
189 // should (clock can move backward/pause without any bound). ReadIndex is not safe
190 // in that case.
191 // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
192 ReadOnlyOption ReadOnlyOption
193
194 // Logger is the logger used for raft log. For multinode which can host
195 // multiple raft group, each raft group can have its own logger
196 Logger Logger
197
198 // DisableProposalForwarding set to true means that followers will drop
199 // proposals, rather than forwarding them to the leader. One use case for
200 // this feature would be in a situation where the Raft leader is used to
201 // compute the data of a proposal, for example, adding a timestamp from a
202 // hybrid logical clock to data in a monotonically increasing way. Forwarding
203 // should be disabled to prevent a follower with an inaccurate hybrid
204 // logical clock from assigning the timestamp and then forwarding the data
205 // to the leader.
206 DisableProposalForwarding bool
207}
208
209func (c *Config) validate() error {
210 if c.ID == None {
211 return errors.New("cannot use none as id")
212 }
213
214 if c.HeartbeatTick <= 0 {
215 return errors.New("heartbeat tick must be greater than 0")
216 }
217
218 if c.ElectionTick <= c.HeartbeatTick {
219 return errors.New("election tick must be greater than heartbeat tick")
220 }
221
222 if c.Storage == nil {
223 return errors.New("storage cannot be nil")
224 }
225
226 if c.MaxUncommittedEntriesSize == 0 {
227 c.MaxUncommittedEntriesSize = noLimit
228 }
229
230 // default MaxCommittedSizePerReady to MaxSizePerMsg because they were
231 // previously the same parameter.
232 if c.MaxCommittedSizePerReady == 0 {
233 c.MaxCommittedSizePerReady = c.MaxSizePerMsg
234 }
235
236 if c.MaxInflightMsgs <= 0 {
237 return errors.New("max inflight messages must be greater than 0")
238 }
239
240 if c.Logger == nil {
241 c.Logger = raftLogger
242 }
243
244 if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
245 return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
246 }
247
248 return nil
249}
250
251type raft struct {
252 id uint64
253
254 Term uint64
255 Vote uint64
256
257 readStates []ReadState
258
259 // the log
260 raftLog *raftLog
261
262 maxMsgSize uint64
263 maxUncommittedSize uint64
264 maxInflight int
265 prs map[uint64]*Progress
266 learnerPrs map[uint64]*Progress
267 matchBuf uint64Slice
268
269 state StateType
270
271 // isLearner is true if the local raft node is a learner.
272 isLearner bool
273
274 votes map[uint64]bool
275
276 msgs []pb.Message
277
278 // the leader id
279 lead uint64
280 // leadTransferee is id of the leader transfer target when its value is not zero.
281 // Follow the procedure defined in raft thesis 3.10.
282 leadTransferee uint64
283 // Only one conf change may be pending (in the log, but not yet
284 // applied) at a time. This is enforced via pendingConfIndex, which
285 // is set to a value >= the log index of the latest pending
286 // configuration change (if any). Config changes are only allowed to
287 // be proposed if the leader's applied index is greater than this
288 // value.
289 pendingConfIndex uint64
290 // an estimate of the size of the uncommitted tail of the Raft log. Used to
291 // prevent unbounded log growth. Only maintained by the leader. Reset on
292 // term changes.
293 uncommittedSize uint64
294
295 readOnly *readOnly
296
297 // number of ticks since it reached last electionTimeout when it is leader
298 // or candidate.
299 // number of ticks since it reached last electionTimeout or received a
300 // valid message from current leader when it is a follower.
301 electionElapsed int
302
303 // number of ticks since it reached last heartbeatTimeout.
304 // only leader keeps heartbeatElapsed.
305 heartbeatElapsed int
306
307 checkQuorum bool
308 preVote bool
309
310 heartbeatTimeout int
311 electionTimeout int
312 // randomizedElectionTimeout is a random number between
313 // [electiontimeout, 2 * electiontimeout - 1]. It gets reset
314 // when raft changes its state to follower or candidate.
315 randomizedElectionTimeout int
316 disableProposalForwarding bool
317
318 tick func()
319 step stepFunc
320
321 logger Logger
322}
323
324func newRaft(c *Config) *raft {
325 if err := c.validate(); err != nil {
326 panic(err.Error())
327 }
328 raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady)
329 hs, cs, err := c.Storage.InitialState()
330 if err != nil {
331 panic(err) // TODO(bdarnell)
332 }
333 peers := c.peers
334 learners := c.learners
335 if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
336 if len(peers) > 0 || len(learners) > 0 {
337 // TODO(bdarnell): the peers argument is always nil except in
338 // tests; the argument should be removed and these tests should be
339 // updated to specify their nodes through a snapshot.
340 panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
341 }
342 peers = cs.Nodes
343 learners = cs.Learners
344 }
345 r := &raft{
346 id: c.ID,
347 lead: None,
348 isLearner: false,
349 raftLog: raftlog,
350 maxMsgSize: c.MaxSizePerMsg,
351 maxInflight: c.MaxInflightMsgs,
352 maxUncommittedSize: c.MaxUncommittedEntriesSize,
353 prs: make(map[uint64]*Progress),
354 learnerPrs: make(map[uint64]*Progress),
355 electionTimeout: c.ElectionTick,
356 heartbeatTimeout: c.HeartbeatTick,
357 logger: c.Logger,
358 checkQuorum: c.CheckQuorum,
359 preVote: c.PreVote,
360 readOnly: newReadOnly(c.ReadOnlyOption),
361 disableProposalForwarding: c.DisableProposalForwarding,
362 }
363 for _, p := range peers {
364 r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
365 }
366 for _, p := range learners {
367 if _, ok := r.prs[p]; ok {
368 panic(fmt.Sprintf("node %x is in both learner and peer list", p))
369 }
370 r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
371 if r.id == p {
372 r.isLearner = true
373 }
374 }
375
376 if !isHardStateEqual(hs, emptyState) {
377 r.loadState(hs)
378 }
379 if c.Applied > 0 {
380 raftlog.appliedTo(c.Applied)
381 }
382 r.becomeFollower(r.Term, None)
383
384 var nodesStrs []string
385 for _, n := range r.nodes() {
386 nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
387 }
388
389 r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
390 r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
391 return r
392}
393
394func (r *raft) hasLeader() bool { return r.lead != None }
395
396func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
397
398func (r *raft) hardState() pb.HardState {
399 return pb.HardState{
400 Term: r.Term,
401 Vote: r.Vote,
402 Commit: r.raftLog.committed,
403 }
404}
405
406func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
407
408func (r *raft) nodes() []uint64 {
409 nodes := make([]uint64, 0, len(r.prs))
410 for id := range r.prs {
411 nodes = append(nodes, id)
412 }
413 sort.Sort(uint64Slice(nodes))
414 return nodes
415}
416
417func (r *raft) learnerNodes() []uint64 {
418 nodes := make([]uint64, 0, len(r.learnerPrs))
419 for id := range r.learnerPrs {
420 nodes = append(nodes, id)
421 }
422 sort.Sort(uint64Slice(nodes))
423 return nodes
424}
425
426// send persists state to stable storage and then sends to its mailbox.
427func (r *raft) send(m pb.Message) {
428 m.From = r.id
429 if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
430 if m.Term == 0 {
431 // All {pre-,}campaign messages need to have the term set when
432 // sending.
433 // - MsgVote: m.Term is the term the node is campaigning for,
434 // non-zero as we increment the term when campaigning.
435 // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
436 // granted, non-zero for the same reason MsgVote is
437 // - MsgPreVote: m.Term is the term the node will campaign,
438 // non-zero as we use m.Term to indicate the next term we'll be
439 // campaigning for
440 // - MsgPreVoteResp: m.Term is the term received in the original
441 // MsgPreVote if the pre-vote was granted, non-zero for the
442 // same reasons MsgPreVote is
443 panic(fmt.Sprintf("term should be set when sending %s", m.Type))
444 }
445 } else {
446 if m.Term != 0 {
447 panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
448 }
449 // do not attach term to MsgProp, MsgReadIndex
450 // proposals are a way to forward to the leader and
451 // should be treated as local message.
452 // MsgReadIndex is also forwarded to leader.
453 if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
454 m.Term = r.Term
455 }
456 }
457 r.msgs = append(r.msgs, m)
458}
459
460func (r *raft) getProgress(id uint64) *Progress {
461 if pr, ok := r.prs[id]; ok {
462 return pr
463 }
464
465 return r.learnerPrs[id]
466}
467
468// sendAppend sends an append RPC with new entries (if any) and the
469// current commit index to the given peer.
470func (r *raft) sendAppend(to uint64) {
471 r.maybeSendAppend(to, true)
472}
473
474// maybeSendAppend sends an append RPC with new entries to the given peer,
475// if necessary. Returns true if a message was sent. The sendIfEmpty
476// argument controls whether messages with no entries will be sent
477// ("empty" messages are useful to convey updated Commit indexes, but
478// are undesirable when we're sending multiple messages in a batch).
479func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool {
480 pr := r.getProgress(to)
481 if pr.IsPaused() {
482 return false
483 }
484 m := pb.Message{}
485 m.To = to
486
487 term, errt := r.raftLog.term(pr.Next - 1)
488 ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
489 if len(ents) == 0 && !sendIfEmpty {
490 return false
491 }
492
493 if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
494 if !pr.RecentActive {
495 r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
496 return false
497 }
498
499 m.Type = pb.MsgSnap
500 snapshot, err := r.raftLog.snapshot()
501 if err != nil {
502 if err == ErrSnapshotTemporarilyUnavailable {
503 r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
504 return false
505 }
506 panic(err) // TODO(bdarnell)
507 }
508 if IsEmptySnap(snapshot) {
509 panic("need non-empty snapshot")
510 }
511 m.Snapshot = snapshot
512 sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
513 r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
514 r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
515 pr.becomeSnapshot(sindex)
516 r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
517 } else {
518 m.Type = pb.MsgApp
519 m.Index = pr.Next - 1
520 m.LogTerm = term
521 m.Entries = ents
522 m.Commit = r.raftLog.committed
523 if n := len(m.Entries); n != 0 {
524 switch pr.State {
525 // optimistically increase the next when in ProgressStateReplicate
526 case ProgressStateReplicate:
527 last := m.Entries[n-1].Index
528 pr.optimisticUpdate(last)
529 pr.ins.add(last)
530 case ProgressStateProbe:
531 pr.pause()
532 default:
533 r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
534 }
535 }
536 }
537 r.send(m)
538 return true
539}
540
541// sendHeartbeat sends a heartbeat RPC to the given peer.
542func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
543 // Attach the commit as min(to.matched, r.committed).
544 // When the leader sends out heartbeat message,
545 // the receiver(follower) might not be matched with the leader
546 // or it might not have all the committed entries.
547 // The leader MUST NOT forward the follower's commit to
548 // an unmatched index.
549 commit := min(r.getProgress(to).Match, r.raftLog.committed)
550 m := pb.Message{
551 To: to,
552 Type: pb.MsgHeartbeat,
553 Commit: commit,
554 Context: ctx,
555 }
556
557 r.send(m)
558}
559
560func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
561 for id, pr := range r.prs {
562 f(id, pr)
563 }
564
565 for id, pr := range r.learnerPrs {
566 f(id, pr)
567 }
568}
569
570// bcastAppend sends RPC, with entries to all peers that are not up-to-date
571// according to the progress recorded in r.prs.
572func (r *raft) bcastAppend() {
573 r.forEachProgress(func(id uint64, _ *Progress) {
574 if id == r.id {
575 return
576 }
577
578 r.sendAppend(id)
579 })
580}
581
582// bcastHeartbeat sends RPC, without entries to all the peers.
583func (r *raft) bcastHeartbeat() {
584 lastCtx := r.readOnly.lastPendingRequestCtx()
585 if len(lastCtx) == 0 {
586 r.bcastHeartbeatWithCtx(nil)
587 } else {
588 r.bcastHeartbeatWithCtx([]byte(lastCtx))
589 }
590}
591
592func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
593 r.forEachProgress(func(id uint64, _ *Progress) {
594 if id == r.id {
595 return
596 }
597 r.sendHeartbeat(id, ctx)
598 })
599}
600
601// maybeCommit attempts to advance the commit index. Returns true if
602// the commit index changed (in which case the caller should call
603// r.bcastAppend).
604func (r *raft) maybeCommit() bool {
605 // Preserving matchBuf across calls is an optimization
606 // used to avoid allocating a new slice on each call.
607 if cap(r.matchBuf) < len(r.prs) {
608 r.matchBuf = make(uint64Slice, len(r.prs))
609 }
610 mis := r.matchBuf[:len(r.prs)]
611 idx := 0
612 for _, p := range r.prs {
613 mis[idx] = p.Match
614 idx++
615 }
616 sort.Sort(mis)
617 mci := mis[len(mis)-r.quorum()]
618 return r.raftLog.maybeCommit(mci, r.Term)
619}
620
621func (r *raft) reset(term uint64) {
622 if r.Term != term {
623 r.Term = term
624 r.Vote = None
625 }
626 r.lead = None
627
628 r.electionElapsed = 0
629 r.heartbeatElapsed = 0
630 r.resetRandomizedElectionTimeout()
631
632 r.abortLeaderTransfer()
633
634 r.votes = make(map[uint64]bool)
635 r.forEachProgress(func(id uint64, pr *Progress) {
636 *pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
637 if id == r.id {
638 pr.Match = r.raftLog.lastIndex()
639 }
640 })
641
642 r.pendingConfIndex = 0
643 r.uncommittedSize = 0
644 r.readOnly = newReadOnly(r.readOnly.option)
645}
646
647func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) {
648 li := r.raftLog.lastIndex()
649 for i := range es {
650 es[i].Term = r.Term
651 es[i].Index = li + 1 + uint64(i)
652 }
653 // Track the size of this uncommitted proposal.
654 if !r.increaseUncommittedSize(es) {
655 r.logger.Debugf(
656 "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal",
657 r.id,
658 )
659 // Drop the proposal.
660 return false
661 }
662 // use latest "last" index after truncate/append
663 li = r.raftLog.append(es...)
664 r.getProgress(r.id).maybeUpdate(li)
665 // Regardless of maybeCommit's return, our caller will call bcastAppend.
666 r.maybeCommit()
667 return true
668}
669
670// tickElection is run by followers and candidates after r.electionTimeout.
671func (r *raft) tickElection() {
672 r.electionElapsed++
673
674 if r.promotable() && r.pastElectionTimeout() {
675 r.electionElapsed = 0
676 r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
677 }
678}
679
680// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
681func (r *raft) tickHeartbeat() {
682 r.heartbeatElapsed++
683 r.electionElapsed++
684
685 if r.electionElapsed >= r.electionTimeout {
686 r.electionElapsed = 0
687 if r.checkQuorum {
688 r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
689 }
690 // If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
691 if r.state == StateLeader && r.leadTransferee != None {
692 r.abortLeaderTransfer()
693 }
694 }
695
696 if r.state != StateLeader {
697 return
698 }
699
700 if r.heartbeatElapsed >= r.heartbeatTimeout {
701 r.heartbeatElapsed = 0
702 r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
703 }
704}
705
706func (r *raft) becomeFollower(term uint64, lead uint64) {
707 r.step = stepFollower
708 r.reset(term)
709 r.tick = r.tickElection
710 r.lead = lead
711 r.state = StateFollower
712 r.logger.Infof("%x became follower at term %d", r.id, r.Term)
713}
714
715func (r *raft) becomeCandidate() {
716 // TODO(xiangli) remove the panic when the raft implementation is stable
717 if r.state == StateLeader {
718 panic("invalid transition [leader -> candidate]")
719 }
720 r.step = stepCandidate
721 r.reset(r.Term + 1)
722 r.tick = r.tickElection
723 r.Vote = r.id
724 r.state = StateCandidate
725 r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
726}
727
728func (r *raft) becomePreCandidate() {
729 // TODO(xiangli) remove the panic when the raft implementation is stable
730 if r.state == StateLeader {
731 panic("invalid transition [leader -> pre-candidate]")
732 }
733 // Becoming a pre-candidate changes our step functions and state,
734 // but doesn't change anything else. In particular it does not increase
735 // r.Term or change r.Vote.
736 r.step = stepCandidate
737 r.votes = make(map[uint64]bool)
738 r.tick = r.tickElection
739 r.lead = None
740 r.state = StatePreCandidate
741 r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
742}
743
744func (r *raft) becomeLeader() {
745 // TODO(xiangli) remove the panic when the raft implementation is stable
746 if r.state == StateFollower {
747 panic("invalid transition [follower -> leader]")
748 }
749 r.step = stepLeader
750 r.reset(r.Term)
751 r.tick = r.tickHeartbeat
752 r.lead = r.id
753 r.state = StateLeader
754 // Followers enter replicate mode when they've been successfully probed
755 // (perhaps after having received a snapshot as a result). The leader is
756 // trivially in this state. Note that r.reset() has initialized this
757 // progress with the last index already.
758 r.prs[r.id].becomeReplicate()
759
760 // Conservatively set the pendingConfIndex to the last index in the
761 // log. There may or may not be a pending config change, but it's
762 // safe to delay any future proposals until we commit all our
763 // pending log entries, and scanning the entire tail of the log
764 // could be expensive.
765 r.pendingConfIndex = r.raftLog.lastIndex()
766
767 emptyEnt := pb.Entry{Data: nil}
768 if !r.appendEntry(emptyEnt) {
769 // This won't happen because we just called reset() above.
770 r.logger.Panic("empty entry was dropped")
771 }
772 // As a special case, don't count the initial empty entry towards the
773 // uncommitted log quota. This is because we want to preserve the
774 // behavior of allowing one entry larger than quota if the current
775 // usage is zero.
776 r.reduceUncommittedSize([]pb.Entry{emptyEnt})
777 r.logger.Infof("%x became leader at term %d", r.id, r.Term)
778}
779
780func (r *raft) campaign(t CampaignType) {
781 var term uint64
782 var voteMsg pb.MessageType
783 if t == campaignPreElection {
784 r.becomePreCandidate()
785 voteMsg = pb.MsgPreVote
786 // PreVote RPCs are sent for the next term before we've incremented r.Term.
787 term = r.Term + 1
788 } else {
789 r.becomeCandidate()
790 voteMsg = pb.MsgVote
791 term = r.Term
792 }
793 if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
794 // We won the election after voting for ourselves (which must mean that
795 // this is a single-node cluster). Advance to the next state.
796 if t == campaignPreElection {
797 r.campaign(campaignElection)
798 } else {
799 r.becomeLeader()
800 }
801 return
802 }
803 for id := range r.prs {
804 if id == r.id {
805 continue
806 }
807 r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
808 r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
809
810 var ctx []byte
811 if t == campaignTransfer {
812 ctx = []byte(t)
813 }
814 r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
815 }
816}
817
818func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
819 if v {
820 r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
821 } else {
822 r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
823 }
824 if _, ok := r.votes[id]; !ok {
825 r.votes[id] = v
826 }
827 for _, vv := range r.votes {
828 if vv {
829 granted++
830 }
831 }
832 return granted
833}
834
835func (r *raft) Step(m pb.Message) error {
836 // Handle the message term, which may result in our stepping down to a follower.
837 switch {
838 case m.Term == 0:
839 // local message
840 case m.Term > r.Term:
841 if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
842 force := bytes.Equal(m.Context, []byte(campaignTransfer))
843 inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
844 if !force && inLease {
845 // If a server receives a RequestVote request within the minimum election timeout
846 // of hearing from a current leader, it does not update its term or grant its vote
847 r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
848 r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
849 return nil
850 }
851 }
852 switch {
853 case m.Type == pb.MsgPreVote:
854 // Never change our term in response to a PreVote
855 case m.Type == pb.MsgPreVoteResp && !m.Reject:
856 // We send pre-vote requests with a term in our future. If the
857 // pre-vote is granted, we will increment our term when we get a
858 // quorum. If it is not, the term comes from the node that
859 // rejected our vote so we should become a follower at the new
860 // term.
861 default:
862 r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
863 r.id, r.Term, m.Type, m.From, m.Term)
864 if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
865 r.becomeFollower(m.Term, m.From)
866 } else {
867 r.becomeFollower(m.Term, None)
868 }
869 }
870
871 case m.Term < r.Term:
872 if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
873 // We have received messages from a leader at a lower term. It is possible
874 // that these messages were simply delayed in the network, but this could
875 // also mean that this node has advanced its term number during a network
876 // partition, and it is now unable to either win an election or to rejoin
877 // the majority on the old term. If checkQuorum is false, this will be
878 // handled by incrementing term numbers in response to MsgVote with a
879 // higher term, but if checkQuorum is true we may not advance the term on
880 // MsgVote and must generate other messages to advance the term. The net
881 // result of these two features is to minimize the disruption caused by
882 // nodes that have been removed from the cluster's configuration: a
883 // removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
884 // but it will not receive MsgApp or MsgHeartbeat, so it will not create
885 // disruptive term increases, by notifying leader of this node's activeness.
886 // The above comments also true for Pre-Vote
887 //
888 // When follower gets isolated, it soon starts an election ending
889 // up with a higher term than leader, although it won't receive enough
890 // votes to win the election. When it regains connectivity, this response
891 // with "pb.MsgAppResp" of higher term would force leader to step down.
892 // However, this disruption is inevitable to free this stuck node with
893 // fresh election. This can be prevented with Pre-Vote phase.
894 r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
895 } else if m.Type == pb.MsgPreVote {
896 // Before Pre-Vote enable, there may have candidate with higher term,
897 // but less log. After update to Pre-Vote, the cluster may deadlock if
898 // we drop messages with a lower term.
899 r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
900 r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
901 r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true})
902 } else {
903 // ignore other cases
904 r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
905 r.id, r.Term, m.Type, m.From, m.Term)
906 }
907 return nil
908 }
909
910 switch m.Type {
911 case pb.MsgHup:
912 if r.state != StateLeader {
913 ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
914 if err != nil {
915 r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
916 }
917 if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
918 r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
919 return nil
920 }
921
922 r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
923 if r.preVote {
924 r.campaign(campaignPreElection)
925 } else {
926 r.campaign(campaignElection)
927 }
928 } else {
929 r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
930 }
931
932 case pb.MsgVote, pb.MsgPreVote:
933 if r.isLearner {
934 // TODO: learner may need to vote, in case of node down when confchange.
935 r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
936 r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
937 return nil
938 }
939 // We can vote if this is a repeat of a vote we've already cast...
940 canVote := r.Vote == m.From ||
941 // ...we haven't voted and we don't think there's a leader yet in this term...
942 (r.Vote == None && r.lead == None) ||
943 // ...or this is a PreVote for a future term...
944 (m.Type == pb.MsgPreVote && m.Term > r.Term)
945 // ...and we believe the candidate is up to date.
946 if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
947 r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
948 r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
949 // When responding to Msg{Pre,}Vote messages we include the term
950 // from the message, not the local term. To see why, consider the
951 // case where a single node was previously partitioned away and
952 // it's local term is now out of date. If we include the local term
953 // (recall that for pre-votes we don't update the local term), the
954 // (pre-)campaigning node on the other end will proceed to ignore
955 // the message (it ignores all out of date messages).
956 // The term in the original message and current local term are the
957 // same in the case of regular votes, but different for pre-votes.
958 r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
959 if m.Type == pb.MsgVote {
960 // Only record real votes.
961 r.electionElapsed = 0
962 r.Vote = m.From
963 }
964 } else {
965 r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
966 r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
967 r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
968 }
969
970 default:
971 err := r.step(r, m)
972 if err != nil {
973 return err
974 }
975 }
976 return nil
977}
978
979type stepFunc func(r *raft, m pb.Message) error
980
981func stepLeader(r *raft, m pb.Message) error {
982 // These message types do not require any progress for m.From.
983 switch m.Type {
984 case pb.MsgBeat:
985 r.bcastHeartbeat()
986 return nil
987 case pb.MsgCheckQuorum:
988 if !r.checkQuorumActive() {
989 r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
990 r.becomeFollower(r.Term, None)
991 }
992 return nil
993 case pb.MsgProp:
994 if len(m.Entries) == 0 {
995 r.logger.Panicf("%x stepped empty MsgProp", r.id)
996 }
997 if _, ok := r.prs[r.id]; !ok {
998 // If we are not currently a member of the range (i.e. this node
999 // was removed from the configuration while serving as leader),
1000 // drop any new proposals.
1001 return ErrProposalDropped
1002 }
1003 if r.leadTransferee != None {
1004 r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
1005 return ErrProposalDropped
1006 }
1007
1008 for i, e := range m.Entries {
1009 if e.Type == pb.EntryConfChange {
1010 if r.pendingConfIndex > r.raftLog.applied {
1011 r.logger.Infof("propose conf %s ignored since pending unapplied configuration [index %d, applied %d]",
1012 e.String(), r.pendingConfIndex, r.raftLog.applied)
1013 m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
1014 } else {
1015 r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1
1016 }
1017 }
1018 }
1019
1020 if !r.appendEntry(m.Entries...) {
1021 return ErrProposalDropped
1022 }
1023 r.bcastAppend()
1024 return nil
1025 case pb.MsgReadIndex:
1026 if r.quorum() > 1 {
1027 if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
1028 // Reject read only request when this leader has not committed any log entry at its term.
1029 return nil
1030 }
1031
1032 // thinking: use an interally defined context instead of the user given context.
1033 // We can express this in terms of the term and index instead of a user-supplied value.
1034 // This would allow multiple reads to piggyback on the same message.
1035 switch r.readOnly.option {
1036 case ReadOnlySafe:
1037 r.readOnly.addRequest(r.raftLog.committed, m)
1038 r.bcastHeartbeatWithCtx(m.Entries[0].Data)
1039 case ReadOnlyLeaseBased:
1040 ri := r.raftLog.committed
1041 if m.From == None || m.From == r.id { // from local member
1042 r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
1043 } else {
1044 r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
1045 }
1046 }
1047 } else { // there is only one voting member (the leader) in the cluster
1048 if m.From == None || m.From == r.id { // from leader itself
1049 r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
1050 } else { // from learner member
1051 r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: r.raftLog.committed, Entries: m.Entries})
1052 }
1053 }
1054
1055 return nil
1056 }
1057
1058 // All other message types require a progress for m.From (pr).
1059 pr := r.getProgress(m.From)
1060 if pr == nil {
1061 r.logger.Debugf("%x no progress available for %x", r.id, m.From)
1062 return nil
1063 }
1064 switch m.Type {
1065 case pb.MsgAppResp:
1066 pr.RecentActive = true
1067
1068 if m.Reject {
1069 r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
1070 r.id, m.RejectHint, m.From, m.Index)
1071 if pr.maybeDecrTo(m.Index, m.RejectHint) {
1072 r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
1073 if pr.State == ProgressStateReplicate {
1074 pr.becomeProbe()
1075 }
1076 r.sendAppend(m.From)
1077 }
1078 } else {
1079 oldPaused := pr.IsPaused()
1080 if pr.maybeUpdate(m.Index) {
1081 switch {
1082 case pr.State == ProgressStateProbe:
1083 pr.becomeReplicate()
1084 case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
1085 r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
1086 // Transition back to replicating state via probing state
1087 // (which takes the snapshot into account). If we didn't
1088 // move to replicating state, that would only happen with
1089 // the next round of appends (but there may not be a next
1090 // round for a while, exposing an inconsistent RaftStatus).
1091 pr.becomeProbe()
1092 pr.becomeReplicate()
1093 case pr.State == ProgressStateReplicate:
1094 pr.ins.freeTo(m.Index)
1095 }
1096
1097 if r.maybeCommit() {
1098 r.bcastAppend()
1099 } else if oldPaused {
1100 // If we were paused before, this node may be missing the
1101 // latest commit index, so send it.
1102 r.sendAppend(m.From)
1103 }
1104 // We've updated flow control information above, which may
1105 // allow us to send multiple (size-limited) in-flight messages
1106 // at once (such as when transitioning from probe to
1107 // replicate, or when freeTo() covers multiple messages). If
1108 // we have more entries to send, send as many messages as we
1109 // can (without sending empty messages for the commit index)
1110 for r.maybeSendAppend(m.From, false) {
1111 }
1112 // Transfer leadership is in progress.
1113 if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
1114 r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
1115 r.sendTimeoutNow(m.From)
1116 }
1117 }
1118 }
1119 case pb.MsgHeartbeatResp:
1120 pr.RecentActive = true
1121 pr.resume()
1122
1123 // free one slot for the full inflights window to allow progress.
1124 if pr.State == ProgressStateReplicate && pr.ins.full() {
1125 pr.ins.freeFirstOne()
1126 }
1127 if pr.Match < r.raftLog.lastIndex() {
1128 r.sendAppend(m.From)
1129 }
1130
1131 if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
1132 return nil
1133 }
1134
1135 ackCount := r.readOnly.recvAck(m)
1136 if ackCount < r.quorum() {
1137 return nil
1138 }
1139
1140 rss := r.readOnly.advance(m)
1141 for _, rs := range rss {
1142 req := rs.req
1143 if req.From == None || req.From == r.id { // from local member
1144 r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
1145 } else {
1146 r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
1147 }
1148 }
1149 case pb.MsgSnapStatus:
1150 if pr.State != ProgressStateSnapshot {
1151 return nil
1152 }
1153 if !m.Reject {
1154 pr.becomeProbe()
1155 r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
1156 } else {
1157 pr.snapshotFailure()
1158 pr.becomeProbe()
1159 r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
1160 }
1161 // If snapshot finish, wait for the msgAppResp from the remote node before sending
1162 // out the next msgApp.
1163 // If snapshot failure, wait for a heartbeat interval before next try
1164 pr.pause()
1165 case pb.MsgUnreachable:
1166 // During optimistic replication, if the remote becomes unreachable,
1167 // there is huge probability that a MsgApp is lost.
1168 if pr.State == ProgressStateReplicate {
1169 pr.becomeProbe()
1170 }
1171 r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
1172 case pb.MsgTransferLeader:
1173 if pr.IsLearner {
1174 r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
1175 return nil
1176 }
1177 leadTransferee := m.From
1178 lastLeadTransferee := r.leadTransferee
1179 if lastLeadTransferee != None {
1180 if lastLeadTransferee == leadTransferee {
1181 r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
1182 r.id, r.Term, leadTransferee, leadTransferee)
1183 return nil
1184 }
1185 r.abortLeaderTransfer()
1186 r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
1187 }
1188 if leadTransferee == r.id {
1189 r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
1190 return nil
1191 }
1192 // Transfer leadership to third party.
1193 r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
1194 // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
1195 r.electionElapsed = 0
1196 r.leadTransferee = leadTransferee
1197 if pr.Match == r.raftLog.lastIndex() {
1198 r.sendTimeoutNow(leadTransferee)
1199 r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
1200 } else {
1201 r.sendAppend(leadTransferee)
1202 }
1203 }
1204 return nil
1205}
1206
1207// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
1208// whether they respond to MsgVoteResp or MsgPreVoteResp.
1209func stepCandidate(r *raft, m pb.Message) error {
1210 // Only handle vote responses corresponding to our candidacy (while in
1211 // StateCandidate, we may get stale MsgPreVoteResp messages in this term from
1212 // our pre-candidate state).
1213 var myVoteRespType pb.MessageType
1214 if r.state == StatePreCandidate {
1215 myVoteRespType = pb.MsgPreVoteResp
1216 } else {
1217 myVoteRespType = pb.MsgVoteResp
1218 }
1219 switch m.Type {
1220 case pb.MsgProp:
1221 r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
1222 return ErrProposalDropped
1223 case pb.MsgApp:
1224 r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
1225 r.handleAppendEntries(m)
1226 case pb.MsgHeartbeat:
1227 r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
1228 r.handleHeartbeat(m)
1229 case pb.MsgSnap:
1230 r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
1231 r.handleSnapshot(m)
1232 case myVoteRespType:
1233 gr := r.poll(m.From, m.Type, !m.Reject)
1234 r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
1235 switch r.quorum() {
1236 case gr:
1237 if r.state == StatePreCandidate {
1238 r.campaign(campaignElection)
1239 } else {
1240 r.becomeLeader()
1241 r.bcastAppend()
1242 }
1243 case len(r.votes) - gr:
1244 // pb.MsgPreVoteResp contains future term of pre-candidate
1245 // m.Term > r.Term; reuse r.Term
1246 r.becomeFollower(r.Term, None)
1247 }
1248 case pb.MsgTimeoutNow:
1249 r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
1250 }
1251 return nil
1252}
1253
1254func stepFollower(r *raft, m pb.Message) error {
1255 switch m.Type {
1256 case pb.MsgProp:
1257 if r.lead == None {
1258 r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
1259 return ErrProposalDropped
1260 } else if r.disableProposalForwarding {
1261 r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
1262 return ErrProposalDropped
1263 }
1264 m.To = r.lead
1265 r.send(m)
1266 case pb.MsgApp:
1267 r.electionElapsed = 0
1268 r.lead = m.From
1269 r.handleAppendEntries(m)
1270 case pb.MsgHeartbeat:
1271 r.electionElapsed = 0
1272 r.lead = m.From
1273 r.handleHeartbeat(m)
1274 case pb.MsgSnap:
1275 r.electionElapsed = 0
1276 r.lead = m.From
1277 r.handleSnapshot(m)
1278 case pb.MsgTransferLeader:
1279 if r.lead == None {
1280 r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
1281 return nil
1282 }
1283 m.To = r.lead
1284 r.send(m)
1285 case pb.MsgTimeoutNow:
1286 if r.promotable() {
1287 r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
1288 // Leadership transfers never use pre-vote even if r.preVote is true; we
1289 // know we are not recovering from a partition so there is no need for the
1290 // extra round trip.
1291 r.campaign(campaignTransfer)
1292 } else {
1293 r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
1294 }
1295 case pb.MsgReadIndex:
1296 if r.lead == None {
1297 r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
1298 return nil
1299 }
1300 m.To = r.lead
1301 r.send(m)
1302 case pb.MsgReadIndexResp:
1303 if len(m.Entries) != 1 {
1304 r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
1305 return nil
1306 }
1307 r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
1308 }
1309 return nil
1310}
1311
1312func (r *raft) handleAppendEntries(m pb.Message) {
1313 if m.Index < r.raftLog.committed {
1314 r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
1315 return
1316 }
1317
1318 if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
1319 r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
1320 } else {
1321 r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
1322 r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
1323 r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
1324 }
1325}
1326
1327func (r *raft) handleHeartbeat(m pb.Message) {
1328 r.raftLog.commitTo(m.Commit)
1329 r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
1330}
1331
1332func (r *raft) handleSnapshot(m pb.Message) {
1333 sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
1334 if r.restore(m.Snapshot) {
1335 r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
1336 r.id, r.raftLog.committed, sindex, sterm)
1337 r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
1338 } else {
1339 r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
1340 r.id, r.raftLog.committed, sindex, sterm)
1341 r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
1342 }
1343}
1344
1345// restore recovers the state machine from a snapshot. It restores the log and the
1346// configuration of state machine.
1347func (r *raft) restore(s pb.Snapshot) bool {
1348 if s.Metadata.Index <= r.raftLog.committed {
1349 return false
1350 }
1351 if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
1352 r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
1353 r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
1354 r.raftLog.commitTo(s.Metadata.Index)
1355 return false
1356 }
1357
1358 // The normal peer can't become learner.
1359 if !r.isLearner {
1360 for _, id := range s.Metadata.ConfState.Learners {
1361 if id == r.id {
1362 r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
1363 return false
1364 }
1365 }
1366 }
1367
1368 r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
1369 r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
1370
1371 r.raftLog.restore(s)
1372 r.prs = make(map[uint64]*Progress)
1373 r.learnerPrs = make(map[uint64]*Progress)
1374 r.restoreNode(s.Metadata.ConfState.Nodes, false)
1375 r.restoreNode(s.Metadata.ConfState.Learners, true)
1376 return true
1377}
1378
1379func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
1380 for _, n := range nodes {
1381 match, next := uint64(0), r.raftLog.lastIndex()+1
1382 if n == r.id {
1383 match = next - 1
1384 r.isLearner = isLearner
1385 }
1386 r.setProgress(n, match, next, isLearner)
1387 r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
1388 }
1389}
1390
1391// promotable indicates whether state machine can be promoted to leader,
1392// which is true when its own id is in progress list.
1393func (r *raft) promotable() bool {
1394 _, ok := r.prs[r.id]
1395 return ok
1396}
1397
1398func (r *raft) addNode(id uint64) {
1399 r.addNodeOrLearnerNode(id, false)
1400}
1401
1402func (r *raft) addLearner(id uint64) {
1403 r.addNodeOrLearnerNode(id, true)
1404}
1405
1406func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
1407 pr := r.getProgress(id)
1408 if pr == nil {
1409 r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
1410 } else {
1411 if isLearner && !pr.IsLearner {
1412 // can only change Learner to Voter
1413 r.logger.Infof("%x ignored addLearner: do not support changing %x from raft peer to learner.", r.id, id)
1414 return
1415 }
1416
1417 if isLearner == pr.IsLearner {
1418 // Ignore any redundant addNode calls (which can happen because the
1419 // initial bootstrapping entries are applied twice).
1420 return
1421 }
1422
1423 // change Learner to Voter, use origin Learner progress
1424 delete(r.learnerPrs, id)
1425 pr.IsLearner = false
1426 r.prs[id] = pr
1427 }
1428
1429 if r.id == id {
1430 r.isLearner = isLearner
1431 }
1432
1433 // When a node is first added, we should mark it as recently active.
1434 // Otherwise, CheckQuorum may cause us to step down if it is invoked
1435 // before the added node has a chance to communicate with us.
1436 pr = r.getProgress(id)
1437 pr.RecentActive = true
1438}
1439
1440func (r *raft) removeNode(id uint64) {
1441 r.delProgress(id)
1442
1443 // do not try to commit or abort transferring if there is no nodes in the cluster.
1444 if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
1445 return
1446 }
1447
1448 // The quorum size is now smaller, so see if any pending entries can
1449 // be committed.
1450 if r.maybeCommit() {
1451 r.bcastAppend()
1452 }
1453 // If the removed node is the leadTransferee, then abort the leadership transferring.
1454 if r.state == StateLeader && r.leadTransferee == id {
1455 r.abortLeaderTransfer()
1456 }
1457}
1458
1459func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
1460 if !isLearner {
1461 delete(r.learnerPrs, id)
1462 r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
1463 return
1464 }
1465
1466 if _, ok := r.prs[id]; ok {
1467 panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
1468 }
1469 r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
1470}
1471
1472func (r *raft) delProgress(id uint64) {
1473 delete(r.prs, id)
1474 delete(r.learnerPrs, id)
1475}
1476
1477func (r *raft) loadState(state pb.HardState) {
1478 if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
1479 r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
1480 }
1481 r.raftLog.committed = state.Commit
1482 r.Term = state.Term
1483 r.Vote = state.Vote
1484}
1485
1486// pastElectionTimeout returns true iff r.electionElapsed is greater
1487// than or equal to the randomized election timeout in
1488// [electiontimeout, 2 * electiontimeout - 1].
1489func (r *raft) pastElectionTimeout() bool {
1490 return r.electionElapsed >= r.randomizedElectionTimeout
1491}
1492
1493func (r *raft) resetRandomizedElectionTimeout() {
1494 r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
1495}
1496
1497// checkQuorumActive returns true if the quorum is active from
1498// the view of the local raft state machine. Otherwise, it returns
1499// false.
1500// checkQuorumActive also resets all RecentActive to false.
1501func (r *raft) checkQuorumActive() bool {
1502 var act int
1503
1504 r.forEachProgress(func(id uint64, pr *Progress) {
1505 if id == r.id { // self is always active
1506 act++
1507 return
1508 }
1509
1510 if pr.RecentActive && !pr.IsLearner {
1511 act++
1512 }
1513
1514 pr.RecentActive = false
1515 })
1516
1517 return act >= r.quorum()
1518}
1519
1520func (r *raft) sendTimeoutNow(to uint64) {
1521 r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
1522}
1523
1524func (r *raft) abortLeaderTransfer() {
1525 r.leadTransferee = None
1526}
1527
1528// increaseUncommittedSize computes the size of the proposed entries and
1529// determines whether they would push leader over its maxUncommittedSize limit.
1530// If the new entries would exceed the limit, the method returns false. If not,
1531// the increase in uncommitted entry size is recorded and the method returns
1532// true.
1533func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool {
1534 var s uint64
1535 for _, e := range ents {
1536 s += uint64(PayloadSize(e))
1537 }
1538
1539 if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize {
1540 // If the uncommitted tail of the Raft log is empty, allow any size
1541 // proposal. Otherwise, limit the size of the uncommitted tail of the
1542 // log and drop any proposal that would push the size over the limit.
1543 return false
1544 }
1545 r.uncommittedSize += s
1546 return true
1547}
1548
1549// reduceUncommittedSize accounts for the newly committed entries by decreasing
1550// the uncommitted entry size limit.
1551func (r *raft) reduceUncommittedSize(ents []pb.Entry) {
1552 if r.uncommittedSize == 0 {
1553 // Fast-path for followers, who do not track or enforce the limit.
1554 return
1555 }
1556
1557 var s uint64
1558 for _, e := range ents {
1559 s += uint64(PayloadSize(e))
1560 }
1561 if s > r.uncommittedSize {
1562 // uncommittedSize may underestimate the size of the uncommitted Raft
1563 // log tail but will never overestimate it. Saturate at 0 instead of
1564 // allowing overflow.
1565 r.uncommittedSize = 0
1566 } else {
1567 r.uncommittedSize -= s
1568 }
1569}
1570
1571func numOfPendingConf(ents []pb.Entry) int {
1572 n := 0
1573 for i := range ents {
1574 if ents[i].Type == pb.EntryConfChange {
1575 n++
1576 }
1577 }
1578 return n
1579}