khenaidoo | ffe076b | 2019-01-15 16:08:08 -0500 | [diff] [blame^] | 1 | // Copyright 2016 The etcd Authors |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | // Package cache exports functionality for efficiently caching and mapping |
| 16 | // `RangeRequest`s to corresponding `RangeResponse`s. |
| 17 | package cache |
| 18 | |
| 19 | import ( |
| 20 | "errors" |
| 21 | "sync" |
| 22 | |
| 23 | "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" |
| 24 | pb "github.com/coreos/etcd/etcdserver/etcdserverpb" |
| 25 | "github.com/coreos/etcd/pkg/adt" |
| 26 | "github.com/golang/groupcache/lru" |
| 27 | ) |
| 28 | |
| 29 | var ( |
| 30 | DefaultMaxEntries = 2048 |
| 31 | ErrCompacted = rpctypes.ErrGRPCCompacted |
| 32 | ) |
| 33 | |
| 34 | type Cache interface { |
| 35 | Add(req *pb.RangeRequest, resp *pb.RangeResponse) |
| 36 | Get(req *pb.RangeRequest) (*pb.RangeResponse, error) |
| 37 | Compact(revision int64) |
| 38 | Invalidate(key []byte, endkey []byte) |
| 39 | Size() int |
| 40 | Close() |
| 41 | } |
| 42 | |
| 43 | // keyFunc returns the key of a request, which is used to look up its caching response in the cache. |
| 44 | func keyFunc(req *pb.RangeRequest) string { |
| 45 | // TODO: use marshalTo to reduce allocation |
| 46 | b, err := req.Marshal() |
| 47 | if err != nil { |
| 48 | panic(err) |
| 49 | } |
| 50 | return string(b) |
| 51 | } |
| 52 | |
| 53 | func NewCache(maxCacheEntries int) Cache { |
| 54 | return &cache{ |
| 55 | lru: lru.New(maxCacheEntries), |
| 56 | compactedRev: -1, |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | func (c *cache) Close() {} |
| 61 | |
| 62 | // cache implements Cache |
| 63 | type cache struct { |
| 64 | mu sync.RWMutex |
| 65 | lru *lru.Cache |
| 66 | |
| 67 | // a reverse index for cache invalidation |
| 68 | cachedRanges adt.IntervalTree |
| 69 | |
| 70 | compactedRev int64 |
| 71 | } |
| 72 | |
| 73 | // Add adds the response of a request to the cache if its revision is larger than the compacted revision of the cache. |
| 74 | func (c *cache) Add(req *pb.RangeRequest, resp *pb.RangeResponse) { |
| 75 | key := keyFunc(req) |
| 76 | |
| 77 | c.mu.Lock() |
| 78 | defer c.mu.Unlock() |
| 79 | |
| 80 | if req.Revision > c.compactedRev { |
| 81 | c.lru.Add(key, resp) |
| 82 | } |
| 83 | // we do not need to invalidate a request with a revision specified. |
| 84 | // so we do not need to add it into the reverse index. |
| 85 | if req.Revision != 0 { |
| 86 | return |
| 87 | } |
| 88 | |
| 89 | var ( |
| 90 | iv *adt.IntervalValue |
| 91 | ivl adt.Interval |
| 92 | ) |
| 93 | if len(req.RangeEnd) != 0 { |
| 94 | ivl = adt.NewStringAffineInterval(string(req.Key), string(req.RangeEnd)) |
| 95 | } else { |
| 96 | ivl = adt.NewStringAffinePoint(string(req.Key)) |
| 97 | } |
| 98 | |
| 99 | iv = c.cachedRanges.Find(ivl) |
| 100 | |
| 101 | if iv == nil { |
| 102 | val := map[string]struct{}{key: {}} |
| 103 | c.cachedRanges.Insert(ivl, val) |
| 104 | } else { |
| 105 | val := iv.Val.(map[string]struct{}) |
| 106 | val[key] = struct{}{} |
| 107 | iv.Val = val |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | // Get looks up the caching response for a given request. |
| 112 | // Get is also responsible for lazy eviction when accessing compacted entries. |
| 113 | func (c *cache) Get(req *pb.RangeRequest) (*pb.RangeResponse, error) { |
| 114 | key := keyFunc(req) |
| 115 | |
| 116 | c.mu.Lock() |
| 117 | defer c.mu.Unlock() |
| 118 | |
| 119 | if req.Revision > 0 && req.Revision < c.compactedRev { |
| 120 | c.lru.Remove(key) |
| 121 | return nil, ErrCompacted |
| 122 | } |
| 123 | |
| 124 | if resp, ok := c.lru.Get(key); ok { |
| 125 | return resp.(*pb.RangeResponse), nil |
| 126 | } |
| 127 | return nil, errors.New("not exist") |
| 128 | } |
| 129 | |
| 130 | // Invalidate invalidates the cache entries that intersecting with the given range from key to endkey. |
| 131 | func (c *cache) Invalidate(key, endkey []byte) { |
| 132 | c.mu.Lock() |
| 133 | defer c.mu.Unlock() |
| 134 | |
| 135 | var ( |
| 136 | ivs []*adt.IntervalValue |
| 137 | ivl adt.Interval |
| 138 | ) |
| 139 | if len(endkey) == 0 { |
| 140 | ivl = adt.NewStringAffinePoint(string(key)) |
| 141 | } else { |
| 142 | ivl = adt.NewStringAffineInterval(string(key), string(endkey)) |
| 143 | } |
| 144 | |
| 145 | ivs = c.cachedRanges.Stab(ivl) |
| 146 | for _, iv := range ivs { |
| 147 | keys := iv.Val.(map[string]struct{}) |
| 148 | for key := range keys { |
| 149 | c.lru.Remove(key) |
| 150 | } |
| 151 | } |
| 152 | // delete after removing all keys since it is destructive to 'ivs' |
| 153 | c.cachedRanges.Delete(ivl) |
| 154 | } |
| 155 | |
| 156 | // Compact invalidate all caching response before the given rev. |
| 157 | // Replace with the invalidation is lazy. The actual removal happens when the entries is accessed. |
| 158 | func (c *cache) Compact(revision int64) { |
| 159 | c.mu.Lock() |
| 160 | defer c.mu.Unlock() |
| 161 | |
| 162 | if revision > c.compactedRev { |
| 163 | c.compactedRev = revision |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | func (c *cache) Size() int { |
| 168 | c.mu.RLock() |
| 169 | defer c.mu.RUnlock() |
| 170 | return c.lru.Len() |
| 171 | } |