[VOL-2235] Mocks and interfaces for rw-core
This update consists of mocks that are used by the rw-core
during unit testing. It also includes interfaces used for unit
tests.
Change-Id: I20ca1455c358113c3aa897acc6355e0ddbc614b7
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/backend.go b/vendor/go.etcd.io/etcd/mvcc/backend/backend.go
new file mode 100644
index 0000000..bffd749
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/backend.go
@@ -0,0 +1,570 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ humanize "github.com/dustin/go-humanize"
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+var (
+ defaultBatchLimit = 10000
+ defaultBatchInterval = 100 * time.Millisecond
+
+ defragLimit = 10000
+
+ // initialMmapSize is the initial size of the mmapped region. Setting this larger than
+ // the potential max db size can prevent writer from blocking reader.
+ // This only works for linux.
+ initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
+
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "mvcc/backend")
+
+ // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
+ minSnapshotWarningTimeout = 30 * time.Second
+)
+
+type Backend interface {
+ // ReadTx returns a read transaction. It is replaced by ConcurrentReadTx in the main data path, see #10523.
+ ReadTx() ReadTx
+ BatchTx() BatchTx
+ // ConcurrentReadTx returns a non-blocking read transaction.
+ ConcurrentReadTx() ReadTx
+
+ Snapshot() Snapshot
+ Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
+ // Size returns the current size of the backend physically allocated.
+ // The backend can hold DB space that is not utilized at the moment,
+ // since it can conduct pre-allocation or spare unused space for recycling.
+ // Use SizeInUse() instead for the actual DB size.
+ Size() int64
+ // SizeInUse returns the current size of the backend logically in use.
+ // Since the backend can manage free space in a non-byte unit such as
+ // number of pages, the returned value can be not exactly accurate in bytes.
+ SizeInUse() int64
+ // OpenReadTxN returns the number of currently open read transactions in the backend.
+ OpenReadTxN() int64
+ Defrag() error
+ ForceCommit()
+ Close() error
+}
+
+type Snapshot interface {
+ // Size gets the size of the snapshot.
+ Size() int64
+ // WriteTo writes the snapshot into the given writer.
+ WriteTo(w io.Writer) (n int64, err error)
+ // Close closes the snapshot.
+ Close() error
+}
+
+type backend struct {
+ // size and commits are used with atomic operations so they must be
+ // 64-bit aligned, otherwise 32-bit tests will crash
+
+ // size is the number of bytes allocated in the backend
+ size int64
+ // sizeInUse is the number of bytes actually used in the backend
+ sizeInUse int64
+ // commits counts number of commits since start
+ commits int64
+ // openReadTxN is the number of currently open read transactions in the backend
+ openReadTxN int64
+
+ mu sync.RWMutex
+ db *bolt.DB
+
+ batchInterval time.Duration
+ batchLimit int
+ batchTx *batchTxBuffered
+
+ readTx *readTx
+
+ stopc chan struct{}
+ donec chan struct{}
+
+ lg *zap.Logger
+}
+
+type BackendConfig struct {
+ // Path is the file path to the backend file.
+ Path string
+ // BatchInterval is the maximum time before flushing the BatchTx.
+ BatchInterval time.Duration
+ // BatchLimit is the maximum puts before flushing the BatchTx.
+ BatchLimit int
+ // BackendFreelistType is the backend boltdb's freelist type.
+ BackendFreelistType bolt.FreelistType
+ // MmapSize is the number of bytes to mmap for the backend.
+ MmapSize uint64
+ // Logger logs backend-side operations.
+ Logger *zap.Logger
+}
+
+func DefaultBackendConfig() BackendConfig {
+ return BackendConfig{
+ BatchInterval: defaultBatchInterval,
+ BatchLimit: defaultBatchLimit,
+ MmapSize: initialMmapSize,
+ }
+}
+
+func New(bcfg BackendConfig) Backend {
+ return newBackend(bcfg)
+}
+
+func NewDefaultBackend(path string) Backend {
+ bcfg := DefaultBackendConfig()
+ bcfg.Path = path
+ return newBackend(bcfg)
+}
+
+func newBackend(bcfg BackendConfig) *backend {
+ bopts := &bolt.Options{}
+ if boltOpenOptions != nil {
+ *bopts = *boltOpenOptions
+ }
+ bopts.InitialMmapSize = bcfg.mmapSize()
+ bopts.FreelistType = bcfg.BackendFreelistType
+
+ db, err := bolt.Open(bcfg.Path, 0600, bopts)
+ if err != nil {
+ if bcfg.Logger != nil {
+ bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err))
+ } else {
+ plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
+ }
+ }
+
+ // In future, may want to make buffering optional for low-concurrency systems
+ // or dynamically swap between buffered/non-buffered depending on workload.
+ b := &backend{
+ db: db,
+
+ batchInterval: bcfg.BatchInterval,
+ batchLimit: bcfg.BatchLimit,
+
+ readTx: &readTx{
+ buf: txReadBuffer{
+ txBuffer: txBuffer{make(map[string]*bucketBuffer)},
+ },
+ buckets: make(map[string]*bolt.Bucket),
+ txWg: new(sync.WaitGroup),
+ },
+
+ stopc: make(chan struct{}),
+ donec: make(chan struct{}),
+
+ lg: bcfg.Logger,
+ }
+ b.batchTx = newBatchTxBuffered(b)
+ go b.run()
+ return b
+}
+
+// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
+// write operations. The write result can be retrieved within the same tx immediately.
+// The write result is isolated with other txs until the current one get committed.
+func (b *backend) BatchTx() BatchTx {
+ return b.batchTx
+}
+
+func (b *backend) ReadTx() ReadTx { return b.readTx }
+
+// ConcurrentReadTx creates and returns a new ReadTx, which:
+// A) creates and keeps a copy of backend.readTx.txReadBuffer,
+// B) references the boltdb read Tx (and its bucket cache) of current batch interval.
+func (b *backend) ConcurrentReadTx() ReadTx {
+ b.readTx.RLock()
+ defer b.readTx.RUnlock()
+ // prevent boltdb read Tx from been rolled back until store read Tx is done. Needs to be called when holding readTx.RLock().
+ b.readTx.txWg.Add(1)
+ // TODO: might want to copy the read buffer lazily - create copy when A) end of a write transaction B) end of a batch interval.
+ return &concurrentReadTx{
+ buf: b.readTx.buf.unsafeCopy(),
+ tx: b.readTx.tx,
+ txMu: &b.readTx.txMu,
+ buckets: b.readTx.buckets,
+ txWg: b.readTx.txWg,
+ }
+}
+
+// ForceCommit forces the current batching tx to commit.
+func (b *backend) ForceCommit() {
+ b.batchTx.Commit()
+}
+
+func (b *backend) Snapshot() Snapshot {
+ b.batchTx.Commit()
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ tx, err := b.db.Begin(false)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot begin tx (%s)", err)
+ }
+ }
+
+ stopc, donec := make(chan struct{}), make(chan struct{})
+ dbBytes := tx.Size()
+ go func() {
+ defer close(donec)
+ // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
+ // assuming a min tcp throughput of 100MB/s.
+ var sendRateBytes int64 = 100 * 1024 * 1014
+ warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
+ if warningTimeout < minSnapshotWarningTimeout {
+ warningTimeout = minSnapshotWarningTimeout
+ }
+ start := time.Now()
+ ticker := time.NewTicker(warningTimeout)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ if b.lg != nil {
+ b.lg.Warn(
+ "snapshotting taking too long to transfer",
+ zap.Duration("taking", time.Since(start)),
+ zap.Int64("bytes", dbBytes),
+ zap.String("size", humanize.Bytes(uint64(dbBytes))),
+ )
+ } else {
+ plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
+ }
+
+ case <-stopc:
+ snapshotTransferSec.Observe(time.Since(start).Seconds())
+ return
+ }
+ }
+ }()
+
+ return &snapshot{tx, stopc, donec}
+}
+
+type IgnoreKey struct {
+ Bucket string
+ Key string
+}
+
+func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ err := b.db.View(func(tx *bolt.Tx) error {
+ c := tx.Cursor()
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("cannot get hash of bucket %s", string(next))
+ }
+ h.Write(next)
+ b.ForEach(func(k, v []byte) error {
+ bk := IgnoreKey{Bucket: string(next), Key: string(k)}
+ if _, ok := ignores[bk]; !ok {
+ h.Write(k)
+ h.Write(v)
+ }
+ return nil
+ })
+ }
+ return nil
+ })
+
+ if err != nil {
+ return 0, err
+ }
+
+ return h.Sum32(), nil
+}
+
+func (b *backend) Size() int64 {
+ return atomic.LoadInt64(&b.size)
+}
+
+func (b *backend) SizeInUse() int64 {
+ return atomic.LoadInt64(&b.sizeInUse)
+}
+
+func (b *backend) run() {
+ defer close(b.donec)
+ t := time.NewTimer(b.batchInterval)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ case <-b.stopc:
+ b.batchTx.CommitAndStop()
+ return
+ }
+ if b.batchTx.safePending() != 0 {
+ b.batchTx.Commit()
+ }
+ t.Reset(b.batchInterval)
+ }
+}
+
+func (b *backend) Close() error {
+ close(b.stopc)
+ <-b.donec
+ return b.db.Close()
+}
+
+// Commits returns total number of commits since start
+func (b *backend) Commits() int64 {
+ return atomic.LoadInt64(&b.commits)
+}
+
+func (b *backend) Defrag() error {
+ return b.defrag()
+}
+
+func (b *backend) defrag() error {
+ now := time.Now()
+
+ // TODO: make this non-blocking?
+ // lock batchTx to ensure nobody is using previous tx, and then
+ // close previous ongoing tx.
+ b.batchTx.Lock()
+ defer b.batchTx.Unlock()
+
+ // lock database after lock tx to avoid deadlock.
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // block concurrent read requests while resetting tx
+ b.readTx.Lock()
+ defer b.readTx.Unlock()
+
+ b.batchTx.unsafeCommit(true)
+
+ b.batchTx.tx = nil
+
+ tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions)
+ if err != nil {
+ return err
+ }
+
+ dbp := b.db.Path()
+ tdbp := tmpdb.Path()
+ size1, sizeInUse1 := b.Size(), b.SizeInUse()
+ if b.lg != nil {
+ b.lg.Info(
+ "defragmenting",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes", size1),
+ zap.String("current-db-size", humanize.Bytes(uint64(size1))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse1),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
+ )
+ }
+
+ err = defragdb(b.db, tmpdb, defragLimit)
+ if err != nil {
+ tmpdb.Close()
+ os.RemoveAll(tmpdb.Path())
+ return err
+ }
+
+ err = b.db.Close()
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to close database", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot close database (%s)", err)
+ }
+ }
+ err = tmpdb.Close()
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to close tmp database", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot close database (%s)", err)
+ }
+ }
+ err = os.Rename(tdbp, dbp)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to rename tmp database", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot rename database (%s)", err)
+ }
+ }
+
+ b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err))
+ } else {
+ plog.Panicf("cannot open database at %s (%v)", dbp, err)
+ }
+ }
+ b.batchTx.tx = b.unsafeBegin(true)
+
+ b.readTx.reset()
+ b.readTx.tx = b.unsafeBegin(false)
+
+ size := b.readTx.tx.Size()
+ db := b.readTx.tx.DB()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
+
+ took := time.Since(now)
+ defragSec.Observe(took.Seconds())
+
+ size2, sizeInUse2 := b.Size(), b.SizeInUse()
+ if b.lg != nil {
+ b.lg.Info(
+ "defragmented",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes-diff", size2-size1),
+ zap.Int64("current-db-size-bytes", size2),
+ zap.String("current-db-size", humanize.Bytes(uint64(size2))),
+ zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse2),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))),
+ zap.Duration("took", took),
+ )
+ }
+ return nil
+}
+
+func defragdb(odb, tmpdb *bolt.DB, limit int) error {
+ // open a tx on tmpdb for writes
+ tmptx, err := tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+
+ // open a tx on old db for read
+ tx, err := odb.Begin(false)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ c := tx.Cursor()
+
+ count := 0
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
+ }
+
+ tmpb, berr := tmptx.CreateBucketIfNotExists(next)
+ if berr != nil {
+ return berr
+ }
+ tmpb.FillPercent = 0.9 // for seq write in for each
+
+ b.ForEach(func(k, v []byte) error {
+ count++
+ if count > limit {
+ err = tmptx.Commit()
+ if err != nil {
+ return err
+ }
+ tmptx, err = tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ tmpb = tmptx.Bucket(next)
+ tmpb.FillPercent = 0.9 // for seq write in for each
+
+ count = 0
+ }
+ return tmpb.Put(k, v)
+ })
+ }
+
+ return tmptx.Commit()
+}
+
+func (b *backend) begin(write bool) *bolt.Tx {
+ b.mu.RLock()
+ tx := b.unsafeBegin(write)
+ b.mu.RUnlock()
+
+ size := tx.Size()
+ db := tx.DB()
+ stats := db.Stats()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize)))
+ atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN))
+
+ return tx
+}
+
+func (b *backend) unsafeBegin(write bool) *bolt.Tx {
+ tx, err := b.db.Begin(write)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot begin tx (%s)", err)
+ }
+ }
+ return tx
+}
+
+func (b *backend) OpenReadTxN() int64 {
+ return atomic.LoadInt64(&b.openReadTxN)
+}
+
+// NewTmpBackend creates a backend implementation for testing.
+func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
+ dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
+ if err != nil {
+ panic(err)
+ }
+ tmpPath := filepath.Join(dir, "database")
+ bcfg := DefaultBackendConfig()
+ bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit
+ return newBackend(bcfg), tmpPath
+}
+
+func NewDefaultTmpBackend() (*backend, string) {
+ return NewTmpBackend(defaultBatchInterval, defaultBatchLimit)
+}
+
+type snapshot struct {
+ *bolt.Tx
+ stopc chan struct{}
+ donec chan struct{}
+}
+
+func (s *snapshot) Close() error {
+ close(s.stopc)
+ <-s.donec
+ return s.Tx.Rollback()
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go b/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go
new file mode 100644
index 0000000..d5c8a88
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go
@@ -0,0 +1,339 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+type BatchTx interface {
+ ReadTx
+ UnsafeCreateBucket(name []byte)
+ UnsafePut(bucketName []byte, key []byte, value []byte)
+ UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
+ UnsafeDelete(bucketName []byte, key []byte)
+ // Commit commits a previous tx and begins a new writable one.
+ Commit()
+ // CommitAndStop commits the previous tx and does not create a new one.
+ CommitAndStop()
+}
+
+type batchTx struct {
+ sync.Mutex
+ tx *bolt.Tx
+ backend *backend
+
+ pending int
+}
+
+func (t *batchTx) Lock() {
+ t.Mutex.Lock()
+}
+
+func (t *batchTx) Unlock() {
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ t.Mutex.Unlock()
+}
+
+// BatchTx interface embeds ReadTx interface. But RLock() and RUnlock() do not
+// have appropriate semantics in BatchTx interface. Therefore should not be called.
+// TODO: might want to decouple ReadTx and BatchTx
+
+func (t *batchTx) RLock() {
+ panic("unexpected RLock")
+}
+
+func (t *batchTx) RUnlock() {
+ panic("unexpected RUnlock")
+}
+
+func (t *batchTx) UnsafeCreateBucket(name []byte) {
+ _, err := t.tx.CreateBucket(name)
+ if err != nil && err != bolt.ErrBucketExists {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to create a bucket",
+ zap.String("bucket-name", string(name)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot create bucket %s (%v)", name, err)
+ }
+ }
+ t.pending++
+}
+
+// UnsafePut must be called holding the lock on the tx.
+func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
+ t.unsafePut(bucketName, key, value, false)
+}
+
+// UnsafeSeqPut must be called holding the lock on the tx.
+func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
+ t.unsafePut(bucketName, key, value, true)
+}
+
+func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
+ bucket := t.tx.Bucket(bucketName)
+ if bucket == nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ )
+ } else {
+ plog.Fatalf("bucket %s does not exist", bucketName)
+ }
+ }
+ if seq {
+ // it is useful to increase fill percent when the workloads are mostly append-only.
+ // this can delay the page split and reduce space usage.
+ bucket.FillPercent = 0.9
+ }
+ if err := bucket.Put(key, value); err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to write to a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot put key into bucket (%v)", err)
+ }
+ }
+ t.pending++
+}
+
+// UnsafeRange must be called holding the lock on the tx.
+func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ bucket := t.tx.Bucket(bucketName)
+ if bucket == nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ )
+ } else {
+ plog.Fatalf("bucket %s does not exist", bucketName)
+ }
+ }
+ return unsafeRange(bucket.Cursor(), key, endKey, limit)
+}
+
+func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ var isMatch func(b []byte) bool
+ if len(endKey) > 0 {
+ isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
+ } else {
+ isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
+ limit = 1
+ }
+
+ for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
+ vs = append(vs, cv)
+ keys = append(keys, ck)
+ if limit == int64(len(keys)) {
+ break
+ }
+ }
+ return keys, vs
+}
+
+// UnsafeDelete must be called holding the lock on the tx.
+func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
+ bucket := t.tx.Bucket(bucketName)
+ if bucket == nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ )
+ } else {
+ plog.Fatalf("bucket %s does not exist", bucketName)
+ }
+ }
+ err := bucket.Delete(key)
+ if err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to delete a key",
+ zap.String("bucket-name", string(bucketName)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot delete key from bucket (%v)", err)
+ }
+ }
+ t.pending++
+}
+
+// UnsafeForEach must be called holding the lock on the tx.
+func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ return unsafeForEach(t.tx, bucketName, visitor)
+}
+
+func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
+ if b := tx.Bucket(bucket); b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// Commit commits a previous tx and begins a new writable one.
+func (t *batchTx) Commit() {
+ t.Lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+// CommitAndStop commits the previous tx and does not create a new one.
+func (t *batchTx) CommitAndStop() {
+ t.Lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTx) safePending() int {
+ t.Mutex.Lock()
+ defer t.Mutex.Unlock()
+ return t.pending
+}
+
+func (t *batchTx) commit(stop bool) {
+ // commit the last tx
+ if t.tx != nil {
+ if t.pending == 0 && !stop {
+ return
+ }
+
+ start := time.Now()
+
+ // gofail: var beforeCommit struct{}
+ err := t.tx.Commit()
+ // gofail: var afterCommit struct{}
+
+ rebalanceSec.Observe(t.tx.Stats().RebalanceTime.Seconds())
+ spillSec.Observe(t.tx.Stats().SpillTime.Seconds())
+ writeSec.Observe(t.tx.Stats().WriteTime.Seconds())
+ commitSec.Observe(time.Since(start).Seconds())
+ atomic.AddInt64(&t.backend.commits, 1)
+
+ t.pending = 0
+ if err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal("failed to commit tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot commit tx (%s)", err)
+ }
+ }
+ }
+ if !stop {
+ t.tx = t.backend.begin(true)
+ }
+}
+
+type batchTxBuffered struct {
+ batchTx
+ buf txWriteBuffer
+}
+
+func newBatchTxBuffered(backend *backend) *batchTxBuffered {
+ tx := &batchTxBuffered{
+ batchTx: batchTx{backend: backend},
+ buf: txWriteBuffer{
+ txBuffer: txBuffer{make(map[string]*bucketBuffer)},
+ seq: true,
+ },
+ }
+ tx.Commit()
+ return tx
+}
+
+func (t *batchTxBuffered) Unlock() {
+ if t.pending != 0 {
+ t.backend.readTx.Lock() // blocks txReadBuffer for writing.
+ t.buf.writeback(&t.backend.readTx.buf)
+ t.backend.readTx.Unlock()
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ }
+ t.batchTx.Unlock()
+}
+
+func (t *batchTxBuffered) Commit() {
+ t.Lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) CommitAndStop() {
+ t.Lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) commit(stop bool) {
+ // all read txs must be closed to acquire boltdb commit rwlock
+ t.backend.readTx.Lock()
+ t.unsafeCommit(stop)
+ t.backend.readTx.Unlock()
+}
+
+func (t *batchTxBuffered) unsafeCommit(stop bool) {
+ if t.backend.readTx.tx != nil {
+ // wait all store read transactions using the current boltdb tx to finish,
+ // then close the boltdb tx
+ go func(tx *bolt.Tx, wg *sync.WaitGroup) {
+ wg.Wait()
+ if err := tx.Rollback(); err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal("failed to rollback tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot rollback tx (%s)", err)
+ }
+ }
+ }(t.backend.readTx.tx, t.backend.readTx.txWg)
+ t.backend.readTx.reset()
+ }
+
+ t.batchTx.commit(stop)
+
+ if !stop {
+ t.backend.readTx.tx = t.backend.begin(false)
+ }
+}
+
+func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
+ t.batchTx.UnsafePut(bucketName, key, value)
+ t.buf.put(bucketName, key, value)
+}
+
+func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
+ t.batchTx.UnsafeSeqPut(bucketName, key, value)
+ t.buf.putSeq(bucketName, key, value)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go
new file mode 100644
index 0000000..f15f030
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go
@@ -0,0 +1,23 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux,!windows
+
+package backend
+
+import bolt "go.etcd.io/bbolt"
+
+var boltOpenOptions *bolt.Options
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go
new file mode 100644
index 0000000..f712671
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "syscall"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead
+// which can speed up entire-database read with boltdb. We want to
+// enable MAP_POPULATE for faster key-value store recovery in storage
+// package. If your kernel version is lower than 2.6.23
+// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might
+// silently ignore this flag. Please update your kernel to prevent this.
+var boltOpenOptions = &bolt.Options{
+ MmapFlags: syscall.MAP_POPULATE,
+ NoFreelistSync: true,
+}
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go
new file mode 100644
index 0000000..c650059
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go
@@ -0,0 +1,26 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package backend
+
+import bolt "go.etcd.io/bbolt"
+
+var boltOpenOptions *bolt.Options = nil
+
+// setting mmap size != 0 on windows will allocate the entire
+// mmap size for the file, instead of growing it. So, force 0.
+
+func (bcfg *BackendConfig) mmapSize() int { return 0 }
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/doc.go b/vendor/go.etcd.io/etcd/mvcc/backend/doc.go
new file mode 100644
index 0000000..9cc42fa
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package backend defines a standard interface for etcd's backend MVCC storage.
+package backend
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go b/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go
new file mode 100644
index 0000000..d9641af
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go
@@ -0,0 +1,95 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ commitSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_commit_duration_seconds",
+ Help: "The latency distributions of commit called by backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ rebalanceSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_rebalance_duration_seconds",
+ Help: "The latency distributions of commit.rebalance called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ spillSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_spill_duration_seconds",
+ Help: "The latency distributions of commit.spill called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ writeSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_write_duration_seconds",
+ Help: "The latency distributions of commit.write called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ defragSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_defrag_duration_seconds",
+ Help: "The latency distribution of backend defragmentation.",
+
+ // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^12 == 409.6 sec
+ Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
+ })
+
+ snapshotTransferSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_snapshot_duration_seconds",
+ Help: "The latency distribution of backend snapshots.",
+
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^16 == 655.36 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
+ })
+)
+
+func init() {
+ prometheus.MustRegister(commitSec)
+ prometheus.MustRegister(rebalanceSec)
+ prometheus.MustRegister(spillSec)
+ prometheus.MustRegister(writeSec)
+ prometheus.MustRegister(defragSec)
+ prometheus.MustRegister(snapshotTransferSec)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go b/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go
new file mode 100644
index 0000000..91fe72e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go
@@ -0,0 +1,210 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "math"
+ "sync"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
+// is known to never overwrite any key so range is safe.
+var safeRangeBucket = []byte("key")
+
+type ReadTx interface {
+ Lock()
+ Unlock()
+ RLock()
+ RUnlock()
+
+ UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
+ UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error
+}
+
+type readTx struct {
+ // mu protects accesses to the txReadBuffer
+ mu sync.RWMutex
+ buf txReadBuffer
+
+ // TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle.
+ // txMu protects accesses to buckets and tx on Range requests.
+ txMu sync.RWMutex
+ tx *bolt.Tx
+ buckets map[string]*bolt.Bucket
+ // txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done.
+ txWg *sync.WaitGroup
+}
+
+func (rt *readTx) Lock() { rt.mu.Lock() }
+func (rt *readTx) Unlock() { rt.mu.Unlock() }
+func (rt *readTx) RLock() { rt.mu.RLock() }
+func (rt *readTx) RUnlock() { rt.mu.RUnlock() }
+
+func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if endKey == nil {
+ // forbid duplicates for single keys
+ limit = 1
+ }
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
+ panic("do not use unsafeRange on non-keys bucket")
+ }
+ keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
+ if int64(len(keys)) == limit {
+ return keys, vals
+ }
+
+ // find/cache bucket
+ bn := string(bucketName)
+ rt.txMu.RLock()
+ bucket, ok := rt.buckets[bn]
+ rt.txMu.RUnlock()
+ if !ok {
+ rt.txMu.Lock()
+ bucket = rt.tx.Bucket(bucketName)
+ rt.buckets[bn] = bucket
+ rt.txMu.Unlock()
+ }
+
+ // ignore missing bucket since may have been created in this batch
+ if bucket == nil {
+ return keys, vals
+ }
+ rt.txMu.Lock()
+ c := bucket.Cursor()
+ rt.txMu.Unlock()
+
+ k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
+ return append(k2, keys...), append(v2, vals...)
+}
+
+func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ dups := make(map[string]struct{})
+ getDups := func(k, v []byte) error {
+ dups[string(k)] = struct{}{}
+ return nil
+ }
+ visitNoDup := func(k, v []byte) error {
+ if _, ok := dups[string(k)]; ok {
+ return nil
+ }
+ return visitor(k, v)
+ }
+ if err := rt.buf.ForEach(bucketName, getDups); err != nil {
+ return err
+ }
+ rt.txMu.Lock()
+ err := unsafeForEach(rt.tx, bucketName, visitNoDup)
+ rt.txMu.Unlock()
+ if err != nil {
+ return err
+ }
+ return rt.buf.ForEach(bucketName, visitor)
+}
+
+func (rt *readTx) reset() {
+ rt.buf.reset()
+ rt.buckets = make(map[string]*bolt.Bucket)
+ rt.tx = nil
+ rt.txWg = new(sync.WaitGroup)
+}
+
+// TODO: create a base type for readTx and concurrentReadTx to avoid duplicated function implementation?
+type concurrentReadTx struct {
+ buf txReadBuffer
+ txMu *sync.RWMutex
+ tx *bolt.Tx
+ buckets map[string]*bolt.Bucket
+ txWg *sync.WaitGroup
+}
+
+func (rt *concurrentReadTx) Lock() {}
+func (rt *concurrentReadTx) Unlock() {}
+
+// RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+func (rt *concurrentReadTx) RLock() {}
+
+// RUnlock signals the end of concurrentReadTx.
+func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() }
+
+func (rt *concurrentReadTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ dups := make(map[string]struct{})
+ getDups := func(k, v []byte) error {
+ dups[string(k)] = struct{}{}
+ return nil
+ }
+ visitNoDup := func(k, v []byte) error {
+ if _, ok := dups[string(k)]; ok {
+ return nil
+ }
+ return visitor(k, v)
+ }
+ if err := rt.buf.ForEach(bucketName, getDups); err != nil {
+ return err
+ }
+ rt.txMu.Lock()
+ err := unsafeForEach(rt.tx, bucketName, visitNoDup)
+ rt.txMu.Unlock()
+ if err != nil {
+ return err
+ }
+ return rt.buf.ForEach(bucketName, visitor)
+}
+
+func (rt *concurrentReadTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if endKey == nil {
+ // forbid duplicates for single keys
+ limit = 1
+ }
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
+ panic("do not use unsafeRange on non-keys bucket")
+ }
+ keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
+ if int64(len(keys)) == limit {
+ return keys, vals
+ }
+
+ // find/cache bucket
+ bn := string(bucketName)
+ rt.txMu.RLock()
+ bucket, ok := rt.buckets[bn]
+ rt.txMu.RUnlock()
+ if !ok {
+ rt.txMu.Lock()
+ bucket = rt.tx.Bucket(bucketName)
+ rt.buckets[bn] = bucket
+ rt.txMu.Unlock()
+ }
+
+ // ignore missing bucket since may have been created in this batch
+ if bucket == nil {
+ return keys, vals
+ }
+ rt.txMu.Lock()
+ c := bucket.Cursor()
+ rt.txMu.Unlock()
+
+ k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
+ return append(k2, keys...), append(v2, vals...)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go b/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go
new file mode 100644
index 0000000..d734638
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go
@@ -0,0 +1,203 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "sort"
+)
+
+// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
+type txBuffer struct {
+ buckets map[string]*bucketBuffer
+}
+
+func (txb *txBuffer) reset() {
+ for k, v := range txb.buckets {
+ if v.used == 0 {
+ // demote
+ delete(txb.buckets, k)
+ }
+ v.used = 0
+ }
+}
+
+// txWriteBuffer buffers writes of pending updates that have not yet committed.
+type txWriteBuffer struct {
+ txBuffer
+ seq bool
+}
+
+func (txw *txWriteBuffer) put(bucket, k, v []byte) {
+ txw.seq = false
+ txw.putSeq(bucket, k, v)
+}
+
+func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) {
+ b, ok := txw.buckets[string(bucket)]
+ if !ok {
+ b = newBucketBuffer()
+ txw.buckets[string(bucket)] = b
+ }
+ b.add(k, v)
+}
+
+func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
+ for k, wb := range txw.buckets {
+ rb, ok := txr.buckets[k]
+ if !ok {
+ delete(txw.buckets, k)
+ txr.buckets[k] = wb
+ continue
+ }
+ if !txw.seq && wb.used > 1 {
+ // assume no duplicate keys
+ sort.Sort(wb)
+ }
+ rb.merge(wb)
+ }
+ txw.reset()
+}
+
+// txReadBuffer accesses buffered updates.
+type txReadBuffer struct{ txBuffer }
+
+func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if b := txr.buckets[string(bucketName)]; b != nil {
+ return b.Range(key, endKey, limit)
+ }
+ return nil, nil
+}
+
+func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ if b := txr.buckets[string(bucketName)]; b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock()
+func (txr *txReadBuffer) unsafeCopy() txReadBuffer {
+ txrCopy := txReadBuffer{
+ txBuffer: txBuffer{
+ buckets: make(map[string]*bucketBuffer, len(txr.txBuffer.buckets)),
+ },
+ }
+ for bucketName, bucket := range txr.txBuffer.buckets {
+ txrCopy.txBuffer.buckets[bucketName] = bucket.Copy()
+ }
+ return txrCopy
+}
+
+type kv struct {
+ key []byte
+ val []byte
+}
+
+// bucketBuffer buffers key-value pairs that are pending commit.
+type bucketBuffer struct {
+ buf []kv
+ // used tracks number of elements in use so buf can be reused without reallocation.
+ used int
+}
+
+func newBucketBuffer() *bucketBuffer {
+ return &bucketBuffer{buf: make([]kv, 512), used: 0}
+}
+
+func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
+ f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
+ idx := sort.Search(bb.used, f)
+ if idx < 0 {
+ return nil, nil
+ }
+ if len(endKey) == 0 {
+ if bytes.Equal(key, bb.buf[idx].key) {
+ keys = append(keys, bb.buf[idx].key)
+ vals = append(vals, bb.buf[idx].val)
+ }
+ return keys, vals
+ }
+ if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
+ return nil, nil
+ }
+ for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
+ if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
+ break
+ }
+ keys = append(keys, bb.buf[i].key)
+ vals = append(vals, bb.buf[i].val)
+ }
+ return keys, vals
+}
+
+func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
+ for i := 0; i < bb.used; i++ {
+ if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (bb *bucketBuffer) add(k, v []byte) {
+ bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
+ bb.used++
+ if bb.used == len(bb.buf) {
+ buf := make([]kv, (3*len(bb.buf))/2)
+ copy(buf, bb.buf)
+ bb.buf = buf
+ }
+}
+
+// merge merges data from bb into bbsrc.
+func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
+ for i := 0; i < bbsrc.used; i++ {
+ bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
+ }
+ if bb.used == bbsrc.used {
+ return
+ }
+ if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
+ return
+ }
+
+ sort.Stable(bb)
+
+ // remove duplicates, using only newest update
+ widx := 0
+ for ridx := 1; ridx < bb.used; ridx++ {
+ if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
+ widx++
+ }
+ bb.buf[widx] = bb.buf[ridx]
+ }
+ bb.used = widx + 1
+}
+
+func (bb *bucketBuffer) Len() int { return bb.used }
+func (bb *bucketBuffer) Less(i, j int) bool {
+ return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
+}
+func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
+
+func (bb *bucketBuffer) Copy() *bucketBuffer {
+ bbCopy := bucketBuffer{
+ buf: make([]kv, len(bb.buf)),
+ used: bb.used,
+ }
+ copy(bbCopy.buf, bb.buf)
+ return &bbCopy
+}