khenaidoo | 59ce9dd | 2019-11-11 13:05:32 -0500 | [diff] [blame] | 1 | // Copyright 2015 The etcd Authors |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | package etcdserver |
| 16 | |
| 17 | import ( |
| 18 | "encoding/json" |
| 19 | "expvar" |
| 20 | "fmt" |
| 21 | "log" |
| 22 | "sort" |
| 23 | "sync" |
| 24 | "time" |
| 25 | |
| 26 | "go.etcd.io/etcd/etcdserver/api/membership" |
| 27 | "go.etcd.io/etcd/etcdserver/api/rafthttp" |
| 28 | pb "go.etcd.io/etcd/etcdserver/etcdserverpb" |
| 29 | "go.etcd.io/etcd/pkg/contention" |
| 30 | "go.etcd.io/etcd/pkg/logutil" |
| 31 | "go.etcd.io/etcd/pkg/pbutil" |
| 32 | "go.etcd.io/etcd/pkg/types" |
| 33 | "go.etcd.io/etcd/raft" |
| 34 | "go.etcd.io/etcd/raft/raftpb" |
| 35 | "go.etcd.io/etcd/wal" |
| 36 | "go.etcd.io/etcd/wal/walpb" |
| 37 | "go.uber.org/zap" |
| 38 | ) |
| 39 | |
| 40 | const ( |
| 41 | // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value). |
| 42 | // Assuming the RTT is around 10ms, 1MB max size is large enough. |
| 43 | maxSizePerMsg = 1 * 1024 * 1024 |
| 44 | // Never overflow the rafthttp buffer, which is 4096. |
| 45 | // TODO: a better const? |
| 46 | maxInflightMsgs = 4096 / 8 |
| 47 | ) |
| 48 | |
| 49 | var ( |
| 50 | // protects raftStatus |
| 51 | raftStatusMu sync.Mutex |
| 52 | // indirection for expvar func interface |
| 53 | // expvar panics when publishing duplicate name |
| 54 | // expvar does not support remove a registered name |
| 55 | // so only register a func that calls raftStatus |
| 56 | // and change raftStatus as we need. |
| 57 | raftStatus func() raft.Status |
| 58 | ) |
| 59 | |
| 60 | func init() { |
| 61 | expvar.Publish("raft.status", expvar.Func(func() interface{} { |
| 62 | raftStatusMu.Lock() |
| 63 | defer raftStatusMu.Unlock() |
| 64 | return raftStatus() |
| 65 | })) |
| 66 | } |
| 67 | |
| 68 | // apply contains entries, snapshot to be applied. Once |
| 69 | // an apply is consumed, the entries will be persisted to |
| 70 | // to raft storage concurrently; the application must read |
| 71 | // raftDone before assuming the raft messages are stable. |
| 72 | type apply struct { |
| 73 | entries []raftpb.Entry |
| 74 | snapshot raftpb.Snapshot |
| 75 | // notifyc synchronizes etcd server applies with the raft node |
| 76 | notifyc chan struct{} |
| 77 | } |
| 78 | |
| 79 | type raftNode struct { |
| 80 | lg *zap.Logger |
| 81 | |
| 82 | tickMu *sync.Mutex |
| 83 | raftNodeConfig |
| 84 | |
| 85 | // a chan to send/receive snapshot |
| 86 | msgSnapC chan raftpb.Message |
| 87 | |
| 88 | // a chan to send out apply |
| 89 | applyc chan apply |
| 90 | |
| 91 | // a chan to send out readState |
| 92 | readStateC chan raft.ReadState |
| 93 | |
| 94 | // utility |
| 95 | ticker *time.Ticker |
| 96 | // contention detectors for raft heartbeat message |
| 97 | td *contention.TimeoutDetector |
| 98 | |
| 99 | stopped chan struct{} |
| 100 | done chan struct{} |
| 101 | } |
| 102 | |
| 103 | type raftNodeConfig struct { |
| 104 | lg *zap.Logger |
| 105 | |
| 106 | // to check if msg receiver is removed from cluster |
| 107 | isIDRemoved func(id uint64) bool |
| 108 | raft.Node |
| 109 | raftStorage *raft.MemoryStorage |
| 110 | storage Storage |
| 111 | heartbeat time.Duration // for logging |
| 112 | // transport specifies the transport to send and receive msgs to members. |
| 113 | // Sending messages MUST NOT block. It is okay to drop messages, since |
| 114 | // clients should timeout and reissue their messages. |
| 115 | // If transport is nil, server will panic. |
| 116 | transport rafthttp.Transporter |
| 117 | } |
| 118 | |
| 119 | func newRaftNode(cfg raftNodeConfig) *raftNode { |
| 120 | var lg raft.Logger |
| 121 | if cfg.lg != nil { |
| 122 | lg = logutil.NewRaftLoggerZap(cfg.lg) |
| 123 | } else { |
| 124 | lcfg := logutil.DefaultZapLoggerConfig |
| 125 | var err error |
| 126 | lg, err = logutil.NewRaftLogger(&lcfg) |
| 127 | if err != nil { |
| 128 | log.Fatalf("cannot create raft logger %v", err) |
| 129 | } |
| 130 | } |
| 131 | raft.SetLogger(lg) |
| 132 | r := &raftNode{ |
| 133 | lg: cfg.lg, |
| 134 | tickMu: new(sync.Mutex), |
| 135 | raftNodeConfig: cfg, |
| 136 | // set up contention detectors for raft heartbeat message. |
| 137 | // expect to send a heartbeat within 2 heartbeat intervals. |
| 138 | td: contention.NewTimeoutDetector(2 * cfg.heartbeat), |
| 139 | readStateC: make(chan raft.ReadState, 1), |
| 140 | msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), |
| 141 | applyc: make(chan apply), |
| 142 | stopped: make(chan struct{}), |
| 143 | done: make(chan struct{}), |
| 144 | } |
| 145 | if r.heartbeat == 0 { |
| 146 | r.ticker = &time.Ticker{} |
| 147 | } else { |
| 148 | r.ticker = time.NewTicker(r.heartbeat) |
| 149 | } |
| 150 | return r |
| 151 | } |
| 152 | |
| 153 | // raft.Node does not have locks in Raft package |
| 154 | func (r *raftNode) tick() { |
| 155 | r.tickMu.Lock() |
| 156 | r.Tick() |
| 157 | r.tickMu.Unlock() |
| 158 | } |
| 159 | |
| 160 | // start prepares and starts raftNode in a new goroutine. It is no longer safe |
| 161 | // to modify the fields after it has been started. |
| 162 | func (r *raftNode) start(rh *raftReadyHandler) { |
| 163 | internalTimeout := time.Second |
| 164 | |
| 165 | go func() { |
| 166 | defer r.onStop() |
| 167 | islead := false |
| 168 | |
| 169 | for { |
| 170 | select { |
| 171 | case <-r.ticker.C: |
| 172 | r.tick() |
| 173 | case rd := <-r.Ready(): |
| 174 | if rd.SoftState != nil { |
| 175 | newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead |
| 176 | if newLeader { |
| 177 | leaderChanges.Inc() |
| 178 | } |
| 179 | |
| 180 | if rd.SoftState.Lead == raft.None { |
| 181 | hasLeader.Set(0) |
| 182 | } else { |
| 183 | hasLeader.Set(1) |
| 184 | } |
| 185 | |
| 186 | rh.updateLead(rd.SoftState.Lead) |
| 187 | islead = rd.RaftState == raft.StateLeader |
| 188 | if islead { |
| 189 | isLeader.Set(1) |
| 190 | } else { |
| 191 | isLeader.Set(0) |
| 192 | } |
| 193 | rh.updateLeadership(newLeader) |
| 194 | r.td.Reset() |
| 195 | } |
| 196 | |
| 197 | if len(rd.ReadStates) != 0 { |
| 198 | select { |
| 199 | case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]: |
| 200 | case <-time.After(internalTimeout): |
| 201 | if r.lg != nil { |
| 202 | r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout)) |
| 203 | } else { |
| 204 | plog.Warningf("timed out sending read state") |
| 205 | } |
| 206 | case <-r.stopped: |
| 207 | return |
| 208 | } |
| 209 | } |
| 210 | |
| 211 | notifyc := make(chan struct{}, 1) |
| 212 | ap := apply{ |
| 213 | entries: rd.CommittedEntries, |
| 214 | snapshot: rd.Snapshot, |
| 215 | notifyc: notifyc, |
| 216 | } |
| 217 | |
| 218 | updateCommittedIndex(&ap, rh) |
| 219 | |
| 220 | select { |
| 221 | case r.applyc <- ap: |
| 222 | case <-r.stopped: |
| 223 | return |
| 224 | } |
| 225 | |
| 226 | // the leader can write to its disk in parallel with replicating to the followers and them |
| 227 | // writing to their disks. |
| 228 | // For more details, check raft thesis 10.2.1 |
| 229 | if islead { |
| 230 | // gofail: var raftBeforeLeaderSend struct{} |
| 231 | r.transport.Send(r.processMessages(rd.Messages)) |
| 232 | } |
| 233 | |
| 234 | // gofail: var raftBeforeSave struct{} |
| 235 | if err := r.storage.Save(rd.HardState, rd.Entries); err != nil { |
| 236 | if r.lg != nil { |
| 237 | r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err)) |
| 238 | } else { |
| 239 | plog.Fatalf("raft save state and entries error: %v", err) |
| 240 | } |
| 241 | } |
| 242 | if !raft.IsEmptyHardState(rd.HardState) { |
| 243 | proposalsCommitted.Set(float64(rd.HardState.Commit)) |
| 244 | } |
| 245 | // gofail: var raftAfterSave struct{} |
| 246 | |
| 247 | if !raft.IsEmptySnap(rd.Snapshot) { |
| 248 | // gofail: var raftBeforeSaveSnap struct{} |
| 249 | if err := r.storage.SaveSnap(rd.Snapshot); err != nil { |
| 250 | if r.lg != nil { |
| 251 | r.lg.Fatal("failed to save Raft snapshot", zap.Error(err)) |
| 252 | } else { |
| 253 | plog.Fatalf("raft save snapshot error: %v", err) |
| 254 | } |
| 255 | } |
| 256 | // etcdserver now claim the snapshot has been persisted onto the disk |
| 257 | notifyc <- struct{}{} |
| 258 | |
| 259 | // gofail: var raftAfterSaveSnap struct{} |
| 260 | r.raftStorage.ApplySnapshot(rd.Snapshot) |
| 261 | if r.lg != nil { |
| 262 | r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index)) |
| 263 | } else { |
| 264 | plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) |
| 265 | } |
| 266 | // gofail: var raftAfterApplySnap struct{} |
| 267 | } |
| 268 | |
| 269 | r.raftStorage.Append(rd.Entries) |
| 270 | |
| 271 | if !islead { |
| 272 | // finish processing incoming messages before we signal raftdone chan |
| 273 | msgs := r.processMessages(rd.Messages) |
| 274 | |
| 275 | // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots |
| 276 | notifyc <- struct{}{} |
| 277 | |
| 278 | // Candidate or follower needs to wait for all pending configuration |
| 279 | // changes to be applied before sending messages. |
| 280 | // Otherwise we might incorrectly count votes (e.g. votes from removed members). |
| 281 | // Also slow machine's follower raft-layer could proceed to become the leader |
| 282 | // on its own single-node cluster, before apply-layer applies the config change. |
| 283 | // We simply wait for ALL pending entries to be applied for now. |
| 284 | // We might improve this later on if it causes unnecessary long blocking issues. |
| 285 | waitApply := false |
| 286 | for _, ent := range rd.CommittedEntries { |
| 287 | if ent.Type == raftpb.EntryConfChange { |
| 288 | waitApply = true |
| 289 | break |
| 290 | } |
| 291 | } |
| 292 | if waitApply { |
| 293 | // blocks until 'applyAll' calls 'applyWait.Trigger' |
| 294 | // to be in sync with scheduled config-change job |
| 295 | // (assume notifyc has cap of 1) |
| 296 | select { |
| 297 | case notifyc <- struct{}{}: |
| 298 | case <-r.stopped: |
| 299 | return |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | // gofail: var raftBeforeFollowerSend struct{} |
| 304 | r.transport.Send(msgs) |
| 305 | } else { |
| 306 | // leader already processed 'MsgSnap' and signaled |
| 307 | notifyc <- struct{}{} |
| 308 | } |
| 309 | |
| 310 | r.Advance() |
| 311 | case <-r.stopped: |
| 312 | return |
| 313 | } |
| 314 | } |
| 315 | }() |
| 316 | } |
| 317 | |
| 318 | func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { |
| 319 | var ci uint64 |
| 320 | if len(ap.entries) != 0 { |
| 321 | ci = ap.entries[len(ap.entries)-1].Index |
| 322 | } |
| 323 | if ap.snapshot.Metadata.Index > ci { |
| 324 | ci = ap.snapshot.Metadata.Index |
| 325 | } |
| 326 | if ci != 0 { |
| 327 | rh.updateCommittedIndex(ci) |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { |
| 332 | sentAppResp := false |
| 333 | for i := len(ms) - 1; i >= 0; i-- { |
| 334 | if r.isIDRemoved(ms[i].To) { |
| 335 | ms[i].To = 0 |
| 336 | } |
| 337 | |
| 338 | if ms[i].Type == raftpb.MsgAppResp { |
| 339 | if sentAppResp { |
| 340 | ms[i].To = 0 |
| 341 | } else { |
| 342 | sentAppResp = true |
| 343 | } |
| 344 | } |
| 345 | |
| 346 | if ms[i].Type == raftpb.MsgSnap { |
| 347 | // There are two separate data store: the store for v2, and the KV for v3. |
| 348 | // The msgSnap only contains the most recent snapshot of store without KV. |
| 349 | // So we need to redirect the msgSnap to etcd server main loop for merging in the |
| 350 | // current store snapshot and KV snapshot. |
| 351 | select { |
| 352 | case r.msgSnapC <- ms[i]: |
| 353 | default: |
| 354 | // drop msgSnap if the inflight chan if full. |
| 355 | } |
| 356 | ms[i].To = 0 |
| 357 | } |
| 358 | if ms[i].Type == raftpb.MsgHeartbeat { |
| 359 | ok, exceed := r.td.Observe(ms[i].To) |
| 360 | if !ok { |
| 361 | // TODO: limit request rate. |
| 362 | if r.lg != nil { |
| 363 | r.lg.Warn( |
| 364 | "leader failed to send out heartbeat on time; took too long, leader is overloaded likely from slow disk", |
| 365 | zap.String("to", fmt.Sprintf("%x", ms[i].To)), |
| 366 | zap.Duration("heartbeat-interval", r.heartbeat), |
| 367 | zap.Duration("expected-duration", 2*r.heartbeat), |
| 368 | zap.Duration("exceeded-duration", exceed), |
| 369 | ) |
| 370 | } else { |
| 371 | plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v, to %x)", r.heartbeat, exceed, ms[i].To) |
| 372 | plog.Warningf("server is likely overloaded") |
| 373 | } |
| 374 | heartbeatSendFailures.Inc() |
| 375 | } |
| 376 | } |
| 377 | } |
| 378 | return ms |
| 379 | } |
| 380 | |
| 381 | func (r *raftNode) apply() chan apply { |
| 382 | return r.applyc |
| 383 | } |
| 384 | |
| 385 | func (r *raftNode) stop() { |
| 386 | r.stopped <- struct{}{} |
| 387 | <-r.done |
| 388 | } |
| 389 | |
| 390 | func (r *raftNode) onStop() { |
| 391 | r.Stop() |
| 392 | r.ticker.Stop() |
| 393 | r.transport.Stop() |
| 394 | if err := r.storage.Close(); err != nil { |
| 395 | if r.lg != nil { |
| 396 | r.lg.Panic("failed to close Raft storage", zap.Error(err)) |
| 397 | } else { |
| 398 | plog.Panicf("raft close storage error: %v", err) |
| 399 | } |
| 400 | } |
| 401 | close(r.done) |
| 402 | } |
| 403 | |
| 404 | // for testing |
| 405 | func (r *raftNode) pauseSending() { |
| 406 | p := r.transport.(rafthttp.Pausable) |
| 407 | p.Pause() |
| 408 | } |
| 409 | |
| 410 | func (r *raftNode) resumeSending() { |
| 411 | p := r.transport.(rafthttp.Pausable) |
| 412 | p.Resume() |
| 413 | } |
| 414 | |
| 415 | // advanceTicks advances ticks of Raft node. |
| 416 | // This can be used for fast-forwarding election |
| 417 | // ticks in multi data-center deployments, thus |
| 418 | // speeding up election process. |
| 419 | func (r *raftNode) advanceTicks(ticks int) { |
| 420 | for i := 0; i < ticks; i++ { |
| 421 | r.tick() |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { |
| 426 | var err error |
| 427 | member := cl.MemberByName(cfg.Name) |
| 428 | metadata := pbutil.MustMarshal( |
| 429 | &pb.Metadata{ |
| 430 | NodeID: uint64(member.ID), |
| 431 | ClusterID: uint64(cl.ID()), |
| 432 | }, |
| 433 | ) |
| 434 | if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil { |
| 435 | if cfg.Logger != nil { |
| 436 | cfg.Logger.Panic("failed to create WAL", zap.Error(err)) |
| 437 | } else { |
| 438 | plog.Panicf("create wal error: %v", err) |
| 439 | } |
| 440 | } |
| 441 | peers := make([]raft.Peer, len(ids)) |
| 442 | for i, id := range ids { |
| 443 | var ctx []byte |
| 444 | ctx, err = json.Marshal((*cl).Member(id)) |
| 445 | if err != nil { |
| 446 | if cfg.Logger != nil { |
| 447 | cfg.Logger.Panic("failed to marshal member", zap.Error(err)) |
| 448 | } else { |
| 449 | plog.Panicf("marshal member should never fail: %v", err) |
| 450 | } |
| 451 | } |
| 452 | peers[i] = raft.Peer{ID: uint64(id), Context: ctx} |
| 453 | } |
| 454 | id = member.ID |
| 455 | if cfg.Logger != nil { |
| 456 | cfg.Logger.Info( |
| 457 | "starting local member", |
| 458 | zap.String("local-member-id", id.String()), |
| 459 | zap.String("cluster-id", cl.ID().String()), |
| 460 | ) |
| 461 | } else { |
| 462 | plog.Infof("starting member %s in cluster %s", id, cl.ID()) |
| 463 | } |
| 464 | s = raft.NewMemoryStorage() |
| 465 | c := &raft.Config{ |
| 466 | ID: uint64(id), |
| 467 | ElectionTick: cfg.ElectionTicks, |
| 468 | HeartbeatTick: 1, |
| 469 | Storage: s, |
| 470 | MaxSizePerMsg: maxSizePerMsg, |
| 471 | MaxInflightMsgs: maxInflightMsgs, |
| 472 | CheckQuorum: true, |
| 473 | PreVote: cfg.PreVote, |
| 474 | } |
| 475 | if cfg.Logger != nil { |
| 476 | // called after capnslog setting in "init" function |
| 477 | if cfg.LoggerConfig != nil { |
| 478 | c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) |
| 479 | if err != nil { |
| 480 | log.Fatalf("cannot create raft logger %v", err) |
| 481 | } |
| 482 | } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { |
| 483 | c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | if len(peers) == 0 { |
| 488 | n = raft.RestartNode(c) |
| 489 | } else { |
| 490 | n = raft.StartNode(c, peers) |
| 491 | } |
| 492 | raftStatusMu.Lock() |
| 493 | raftStatus = n.Status |
| 494 | raftStatusMu.Unlock() |
| 495 | return id, n, s, w |
| 496 | } |
| 497 | |
| 498 | func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { |
| 499 | var walsnap walpb.Snapshot |
| 500 | if snapshot != nil { |
| 501 | walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term |
| 502 | } |
| 503 | w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap) |
| 504 | |
| 505 | if cfg.Logger != nil { |
| 506 | cfg.Logger.Info( |
| 507 | "restarting local member", |
| 508 | zap.String("cluster-id", cid.String()), |
| 509 | zap.String("local-member-id", id.String()), |
| 510 | zap.Uint64("commit-index", st.Commit), |
| 511 | ) |
| 512 | } else { |
| 513 | plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit) |
| 514 | } |
| 515 | cl := membership.NewCluster(cfg.Logger, "") |
| 516 | cl.SetID(id, cid) |
| 517 | s := raft.NewMemoryStorage() |
| 518 | if snapshot != nil { |
| 519 | s.ApplySnapshot(*snapshot) |
| 520 | } |
| 521 | s.SetHardState(st) |
| 522 | s.Append(ents) |
| 523 | c := &raft.Config{ |
| 524 | ID: uint64(id), |
| 525 | ElectionTick: cfg.ElectionTicks, |
| 526 | HeartbeatTick: 1, |
| 527 | Storage: s, |
| 528 | MaxSizePerMsg: maxSizePerMsg, |
| 529 | MaxInflightMsgs: maxInflightMsgs, |
| 530 | CheckQuorum: true, |
| 531 | PreVote: cfg.PreVote, |
| 532 | } |
| 533 | if cfg.Logger != nil { |
| 534 | // called after capnslog setting in "init" function |
| 535 | var err error |
| 536 | if cfg.LoggerConfig != nil { |
| 537 | c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) |
| 538 | if err != nil { |
| 539 | log.Fatalf("cannot create raft logger %v", err) |
| 540 | } |
| 541 | } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { |
| 542 | c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) |
| 543 | } |
| 544 | } |
| 545 | |
| 546 | n := raft.RestartNode(c) |
| 547 | raftStatusMu.Lock() |
| 548 | raftStatus = n.Status |
| 549 | raftStatusMu.Unlock() |
| 550 | return id, cl, n, s, w |
| 551 | } |
| 552 | |
| 553 | func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { |
| 554 | var walsnap walpb.Snapshot |
| 555 | if snapshot != nil { |
| 556 | walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term |
| 557 | } |
| 558 | w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap) |
| 559 | |
| 560 | // discard the previously uncommitted entries |
| 561 | for i, ent := range ents { |
| 562 | if ent.Index > st.Commit { |
| 563 | if cfg.Logger != nil { |
| 564 | cfg.Logger.Info( |
| 565 | "discarding uncommitted WAL entries", |
| 566 | zap.Uint64("entry-index", ent.Index), |
| 567 | zap.Uint64("commit-index-from-wal", st.Commit), |
| 568 | zap.Int("number-of-discarded-entries", len(ents)-i), |
| 569 | ) |
| 570 | } else { |
| 571 | plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i) |
| 572 | } |
| 573 | ents = ents[:i] |
| 574 | break |
| 575 | } |
| 576 | } |
| 577 | |
| 578 | // force append the configuration change entries |
| 579 | toAppEnts := createConfigChangeEnts( |
| 580 | cfg.Logger, |
| 581 | getIDs(cfg.Logger, snapshot, ents), |
| 582 | uint64(id), |
| 583 | st.Term, |
| 584 | st.Commit, |
| 585 | ) |
| 586 | ents = append(ents, toAppEnts...) |
| 587 | |
| 588 | // force commit newly appended entries |
| 589 | err := w.Save(raftpb.HardState{}, toAppEnts) |
| 590 | if err != nil { |
| 591 | if cfg.Logger != nil { |
| 592 | cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err)) |
| 593 | } else { |
| 594 | plog.Fatalf("%v", err) |
| 595 | } |
| 596 | } |
| 597 | if len(ents) != 0 { |
| 598 | st.Commit = ents[len(ents)-1].Index |
| 599 | } |
| 600 | |
| 601 | if cfg.Logger != nil { |
| 602 | cfg.Logger.Info( |
| 603 | "forcing restart member", |
| 604 | zap.String("cluster-id", cid.String()), |
| 605 | zap.String("local-member-id", id.String()), |
| 606 | zap.Uint64("commit-index", st.Commit), |
| 607 | ) |
| 608 | } else { |
| 609 | plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit) |
| 610 | } |
| 611 | |
| 612 | cl := membership.NewCluster(cfg.Logger, "") |
| 613 | cl.SetID(id, cid) |
| 614 | s := raft.NewMemoryStorage() |
| 615 | if snapshot != nil { |
| 616 | s.ApplySnapshot(*snapshot) |
| 617 | } |
| 618 | s.SetHardState(st) |
| 619 | s.Append(ents) |
| 620 | c := &raft.Config{ |
| 621 | ID: uint64(id), |
| 622 | ElectionTick: cfg.ElectionTicks, |
| 623 | HeartbeatTick: 1, |
| 624 | Storage: s, |
| 625 | MaxSizePerMsg: maxSizePerMsg, |
| 626 | MaxInflightMsgs: maxInflightMsgs, |
| 627 | CheckQuorum: true, |
| 628 | PreVote: cfg.PreVote, |
| 629 | } |
| 630 | if cfg.Logger != nil { |
| 631 | // called after capnslog setting in "init" function |
| 632 | if cfg.LoggerConfig != nil { |
| 633 | c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig) |
| 634 | if err != nil { |
| 635 | log.Fatalf("cannot create raft logger %v", err) |
| 636 | } |
| 637 | } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil { |
| 638 | c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer) |
| 639 | } |
| 640 | } |
| 641 | |
| 642 | n := raft.RestartNode(c) |
| 643 | raftStatus = n.Status |
| 644 | return id, cl, n, s, w |
| 645 | } |
| 646 | |
| 647 | // getIDs returns an ordered set of IDs included in the given snapshot and |
| 648 | // the entries. The given snapshot/entries can contain two kinds of |
| 649 | // ID-related entry: |
| 650 | // - ConfChangeAddNode, in which case the contained ID will be added into the set. |
| 651 | // - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. |
| 652 | func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { |
| 653 | ids := make(map[uint64]bool) |
| 654 | if snap != nil { |
| 655 | for _, id := range snap.Metadata.ConfState.Voters { |
| 656 | ids[id] = true |
| 657 | } |
| 658 | } |
| 659 | for _, e := range ents { |
| 660 | if e.Type != raftpb.EntryConfChange { |
| 661 | continue |
| 662 | } |
| 663 | var cc raftpb.ConfChange |
| 664 | pbutil.MustUnmarshal(&cc, e.Data) |
| 665 | switch cc.Type { |
| 666 | case raftpb.ConfChangeAddNode: |
| 667 | ids[cc.NodeID] = true |
| 668 | case raftpb.ConfChangeRemoveNode: |
| 669 | delete(ids, cc.NodeID) |
| 670 | case raftpb.ConfChangeUpdateNode: |
| 671 | // do nothing |
| 672 | default: |
| 673 | if lg != nil { |
| 674 | lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) |
| 675 | } else { |
| 676 | plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!") |
| 677 | } |
| 678 | } |
| 679 | } |
| 680 | sids := make(types.Uint64Slice, 0, len(ids)) |
| 681 | for id := range ids { |
| 682 | sids = append(sids, id) |
| 683 | } |
| 684 | sort.Sort(sids) |
| 685 | return []uint64(sids) |
| 686 | } |
| 687 | |
| 688 | // createConfigChangeEnts creates a series of Raft entries (i.e. |
| 689 | // EntryConfChange) to remove the set of given IDs from the cluster. The ID |
| 690 | // `self` is _not_ removed, even if present in the set. |
| 691 | // If `self` is not inside the given ids, it creates a Raft entry to add a |
| 692 | // default member with the given `self`. |
| 693 | func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { |
| 694 | found := false |
| 695 | for _, id := range ids { |
| 696 | if id == self { |
| 697 | found = true |
| 698 | } |
| 699 | } |
| 700 | |
| 701 | var ents []raftpb.Entry |
| 702 | next := index + 1 |
| 703 | |
| 704 | // NB: always add self first, then remove other nodes. Raft will panic if the |
| 705 | // set of voters ever becomes empty. |
| 706 | if !found { |
| 707 | m := membership.Member{ |
| 708 | ID: types.ID(self), |
| 709 | RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, |
| 710 | } |
| 711 | ctx, err := json.Marshal(m) |
| 712 | if err != nil { |
| 713 | if lg != nil { |
| 714 | lg.Panic("failed to marshal member", zap.Error(err)) |
| 715 | } else { |
| 716 | plog.Panicf("marshal member should never fail: %v", err) |
| 717 | } |
| 718 | } |
| 719 | cc := &raftpb.ConfChange{ |
| 720 | Type: raftpb.ConfChangeAddNode, |
| 721 | NodeID: self, |
| 722 | Context: ctx, |
| 723 | } |
| 724 | e := raftpb.Entry{ |
| 725 | Type: raftpb.EntryConfChange, |
| 726 | Data: pbutil.MustMarshal(cc), |
| 727 | Term: term, |
| 728 | Index: next, |
| 729 | } |
| 730 | ents = append(ents, e) |
| 731 | next++ |
| 732 | } |
| 733 | |
| 734 | for _, id := range ids { |
| 735 | if id == self { |
| 736 | continue |
| 737 | } |
| 738 | cc := &raftpb.ConfChange{ |
| 739 | Type: raftpb.ConfChangeRemoveNode, |
| 740 | NodeID: id, |
| 741 | } |
| 742 | e := raftpb.Entry{ |
| 743 | Type: raftpb.EntryConfChange, |
| 744 | Data: pbutil.MustMarshal(cc), |
| 745 | Term: term, |
| 746 | Index: next, |
| 747 | } |
| 748 | ents = append(ents, e) |
| 749 | next++ |
| 750 | } |
| 751 | |
| 752 | return ents |
| 753 | } |