VOL-4925 - Build and release components.
Misc/
o Bulk copyright notice udpate to 2023.
go.mod
go.sum
------
o Bump component version strings to the latest release.
Cosmetic edit to force a build.
Change-Id: Icc8869463d1f1a4451938466c39fcc3d11ebad73
diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore
new file mode 100644
index 0000000..b975a7b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.gitignore
@@ -0,0 +1,3 @@
+*.rdb
+testdata/*/
+.idea/
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
new file mode 100644
index 0000000..1e8d238
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -0,0 +1,21 @@
+run:
+ concurrency: 8
+ deadline: 5m
+ tests: false
+linters:
+ enable-all: true
+ disable:
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - goconst
+ - godox
+ - gosec
+ - maligned
+ - wsl
+ - gomnd
+ - goerr113
+ - exhaustive
+ - nestif
+ - nlreturn
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc
new file mode 100644
index 0000000..8b7f044
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.prettierrc
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/vendor/github.com/go-redis/redis/v8/.travis.yml b/vendor/github.com/go-redis/redis/v8/.travis.yml
new file mode 100644
index 0000000..1bf578d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.travis.yml
@@ -0,0 +1,20 @@
+dist: xenial
+language: go
+
+services:
+ - redis-server
+
+go:
+ - 1.14.x
+ - 1.15.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+go_import_path: github.com/go-redis/redis
+
+before_install:
+ - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
+ -b $(go env GOPATH)/bin v1.31.0
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
new file mode 100644
index 0000000..8392d54
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -0,0 +1,5 @@
+# Changelog
+
+> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+
+See https://redis.uptrace.dev/changelog/
diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE
new file mode 100644
index 0000000..298bed9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
new file mode 100644
index 0000000..49e4c96
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -0,0 +1,21 @@
+all: testdeps
+ go test ./...
+ go test ./... -short -race
+ go test ./... -run=NONE -bench=. -benchmem
+ env GOOS=linux GOARCH=386 go test ./...
+ go vet
+ golangci-lint run
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
new file mode 100644
index 0000000..da5d0fb
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -0,0 +1,137 @@
+# Redis client for Golang
+
+[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
+
+> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+
+- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
+- [Documentation](https://redis.uptrace.dev)
+- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
+- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
+
+## Ecosystem
+
+- [Distributed Locks](https://github.com/bsm/redislock).
+- [Redis Cache](https://github.com/go-redis/cache).
+- [Rate limiting](https://github.com/go-redis/redis_rate).
+
+## Features
+
+- Redis 3 commands except QUIT, MONITOR, and SYNC.
+- Automatic connection pooling with
+ [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
+- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
+ [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
+- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
+- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
+- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
+ without using cluster mode and Redis Sentinel.
+- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+And then install go-redis (note _v8_ in the import; omitting it is a popular mistake):
+
+```shell
+go get github.com/go-redis/redis/v8
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "github.com/go-redis/redis/v8"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## See also
+
+- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
+- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
+- [Golang msgpack](https://github.com/vmihailenco/msgpack)
+- [Golang message task queue](https://github.com/vmihailenco/taskq)
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
new file mode 100644
index 0000000..a6ce5c5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/cluster.go
@@ -0,0 +1,1697 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "net"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 3 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolSize applies per cluster node and not for the whole cluster.
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 3
+ }
+
+ if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.NumCPU()
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ if opt.MaxRetries == 0 {
+ opt.MaxRetries = -1
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+
+ if opt.NewClient == nil {
+ opt.NewClient = NewClient
+ }
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ const disableIdleCheck = -1
+
+ return &Options{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: disableIdleCheck,
+
+ readOnly: opt.ReadOnly,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ failing uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: clOpt.NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+ const numProbe = 10
+ var dur uint64
+
+ for i := 0; i < numProbe; i++ {
+ time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+ start := time.Now()
+ n.Client.Ping(context.TODO())
+ dur += uint64(time.Since(start) / time.Microsecond)
+ }
+
+ latency := float64(dur) / float64(numProbe)
+ atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+ atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+ const timeout = 15 // 15 seconds
+
+ failing := atomic.LoadUint32(&n.failing)
+ if failing == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(failing) < timeout {
+ return true
+ }
+ atomic.StoreUint32(&n.failing, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ activeAddrs []string
+ closed bool
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ addrs: opt.Addrs,
+ nodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.nodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.nodes = nil
+ c.activeAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+ c.mu.RLock()
+ closed := c.closed
+ if !closed {
+ if len(c.activeAddrs) > 0 {
+ addrs = c.activeAddrs
+ } else {
+ addrs = c.addrs
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ //nolint:prealloc
+ var collected []*clusterNode
+
+ c.mu.Lock()
+
+ c.activeAddrs = c.activeAddrs[:0]
+ for addr, node := range c.nodes {
+ if node.Generation() >= generation {
+ c.activeAddrs = append(c.activeAddrs, addr)
+ if c.opt.RouteByLatency {
+ go node.updateLatency()
+ }
+ continue
+ }
+
+ delete(c.nodes, addr)
+ collected = append(collected, node)
+ }
+
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+ node, err := c.get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.nodes[addr]
+ if ok {
+ return node, nil
+ }
+
+ node = newClusterNode(c.opt, addr)
+
+ c.addrs = appendIfNotExists(c.addrs, addr)
+ c.nodes[addr] = node
+
+ return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.nodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.Get(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Failing() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Failing() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var node *clusterNode
+ for _, n := range nodes {
+ if n.Failing() {
+ continue
+ }
+ if node == nil || n.Latency() < node.Latency() {
+ node = n
+ }
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ // If all nodes are failing - return random node
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+ n := rand.Intn(len(nodes))
+ return nodes[n], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func(ctx context.Context) (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+ state, err := c.load(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload(ctx context.Context) {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload(ctx)
+ if err != nil {
+ return
+ }
+ time.Sleep(200 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+ v := c.state.Load()
+ if v != nil {
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload(ctx)
+ }
+ return state, nil
+ }
+ return c.Reload(ctx)
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+ state, err := c.Reload(ctx)
+ if err == nil {
+ return state, nil
+ }
+ return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+type clusterClient struct {
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder //nolint:structcheck
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ *clusterClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ clusterClient: &clusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ },
+ ctx: context.Background(),
+ }
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+ c.cmdable = c.Process
+
+ if opt.IdleCheckFrequency > 0 {
+ go c.reaper(opt.IdleCheckFrequency)
+ }
+
+ return c
+}
+
+func (c *ClusterClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+ c.state.LazyReload(ctx)
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ slot := c.cmdSlot(cmd)
+
+ var node *clusterNode
+ var ask bool
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ if node == nil {
+ var err error
+ node, err = c.cmdNode(ctx, cmdInfo, slot)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ask {
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+ _ = pipe.Process(ctx, cmd)
+ _, lastErr = pipe.Exec(ctx)
+ _ = pipe.Close()
+ ask = false
+ } else {
+ lastErr = node.Client.Process(ctx, cmd)
+ }
+
+ // If there is no error - we are done.
+ if lastErr == nil {
+ return nil
+ }
+ if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload(ctx)
+ }
+ node = nil
+ continue
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && isLoadingError(lastErr) {
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ var moved bool
+ var addr string
+ moved, ask, addr = isMovedError(lastErr)
+ if moved || ask {
+ var err error
+ node, err = c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try another node.
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ return lastErr
+ }
+ return lastErr
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get(context.TODO())
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ for _, idx := range rand.Perm(len(addrs)) {
+ addr := addrs[idx]
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots(ctx).Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ /*
+ * No node is connectable. It's possible that all nodes' IP has changed.
+ * Clear activeAddrs to let client be able to re-connect using the initial
+ * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+ * which might have chance to resolve domain name and get updated IP address.
+ */
+ c.nodes.mu.Lock()
+ c.nodes.activeAddrs = nil
+ c.nodes.mu.Unlock()
+
+ return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+ ticker := time.NewTicker(idleCheckFrequency)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ nodes, err := c.nodes.All()
+ if err != nil {
+ break
+ }
+
+ for _, node := range nodes {
+ _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
+ }
+ }
+ }
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
+}
+
+func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+ err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+ }
+
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) _processPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ ctx context.Context,
+ node *clusterNode,
+ rd *proto.Reader,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+ continue
+ }
+
+ if c.opt.ReadOnly && isLoadingError(err) {
+ node.MarkAsFailing()
+ return err
+ }
+ if isRedisError(err) {
+ continue
+ }
+ return err
+ }
+ return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+ ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := isMovedError(err)
+ if !moved && !ask {
+ return false
+ }
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return false
+ }
+
+ if moved {
+ c.state.LazyReload(ctx)
+ failedCmds.Add(node, cmd)
+ return true
+ }
+
+ if ask {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ return true
+ }
+
+ panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+}
+
+func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) _processTxPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
+ if err != nil {
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
+ }
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ ctx context.Context,
+ rd *proto.Reader,
+ statusCmd *StatusCmd,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+ continue
+ }
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+ ctx context.Context, cmds []Cmder,
+ moved, ask bool,
+ addr string,
+ failedCmds *cmdsMap,
+) error {
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+
+ if moved {
+ c.state.LazyReload(ctx)
+ for _, cmd := range cmds {
+ failedCmds.Add(node, cmd)
+ }
+ return nil
+ }
+
+ if ask {
+ for _, cmd := range cmds {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ err = node.Client.Watch(ctx, fn, keys...)
+ if err == nil {
+ break
+ }
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload(ctx)
+ }
+ node, err = c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(ctx, slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn(context.TODO())
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // Try 3 random nodes.
+ const nodeLimit = 3
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ perm := rand.Perm(len(addrs))
+ if len(perm) > nodeLimit {
+ perm = perm[:nodeLimit]
+ }
+
+ for _, idx := range perm {
+ addr := addrs[idx]
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ info, err := node.Client.Command(ctx).Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if firstErr == nil {
+ panic("not reached")
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+ if err != nil {
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && args[1] == "getkeysinslot" {
+ return args[2].(int)
+ }
+
+ cmdInfo := c.cmdInfo(cmd.Name())
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+ ctx context.Context,
+ cmdInfo *CommandInfo,
+ slot int,
+) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if (c.opt.RouteByLatency || c.opt.RouteRandomly) && cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
+ return state.slotMasterNode(slot)
+}
+
+func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+ if c.opt.RouteByLatency {
+ return state.slotClosestNode(slot)
+ }
+ if c.opt.RouteRandomly {
+ return state.slotRandomNode(slot)
+ }
+ return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return state.slotMasterNode(slot)
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+ m.mu.Lock()
+ m.m[node] = append(m.m[node], cmds...)
+ m.mu.Unlock()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
new file mode 100644
index 0000000..1f0bae0
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
@@ -0,0 +1,25 @@
+package redis
+
+import (
+ "context"
+ "sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ return cmd
+ }
+ cmd.val = size
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
new file mode 100644
index 0000000..5dd5533
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/command.go
@@ -0,0 +1,2396 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+type Cmder interface {
+ Name() string
+ FullName() string
+ Args() []interface{}
+ String() string
+ stringArg(int) string
+ firstKeyPos() int8
+ setFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+
+ if info != nil {
+ return int(info.FirstKeyPos)
+ }
+ return 0
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return internal.String(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ s, _ := cmd.args[pos].(string)
+ return s
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) setFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply(sliceParser)
+ return err
+}
+
+// sliceParser implements proto.MultiBulkParse.
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, n)
+ for i := 0; i < len(vals); i++ {
+ v, err := rd.ReadReply(sliceParser)
+ if err != nil {
+ if err == Nil {
+ vals[i] = nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ vals[i] = err
+ continue
+ }
+ return nil, err
+ }
+ vals[i] = v
+ }
+ return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadArrayReply(sliceParser)
+ if err != nil {
+ return err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadIntReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ num, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = num
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d elements, expected 2", n)
+ }
+
+ sec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ microsec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val = time.Unix(sec, microsec*1000)
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadReply(nil)
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ switch v := v.(type) {
+ case int64:
+ cmd.val = v == 1
+ return nil
+ case string:
+ cmd.val = v == "OK"
+ return nil
+ default:
+ return fmt.Errorf("got %T, wanted int64 or string", v)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloatReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = n == 1
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
+ return &StringIntMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]int64, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = n
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]struct{}, n)
+ for i := int64(0); i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+ var err error
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < n; i++ {
+ var err error
+ msgs[i], err = readXMessage(rd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return XMessage{}, err
+ }
+ if n != 2 {
+ return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ var values map[string]interface{}
+
+ v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ } else {
+ values = v.(map[string]interface{})
+ }
+
+ return XMessage{
+ ID: id,
+ Values: values,
+ }, nil
+}
+
+// stringInterfaceMapParser implements proto.MultiBulkParse.
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]interface{}, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ stream, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, err := readXMessageSlice(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = XStream{
+ Stream: stream,
+ Messages: msgs,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ count, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ lower, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ higher, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = &XPending{
+ Count: count,
+ Lower: lower,
+ Higher: higher,
+ }
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ for i := int64(0); i < n; i++ {
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ if cmd.val.Consumers == nil {
+ cmd.val.Consumers = make(map[string]int64)
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XPendingExt, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumer, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ idle, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ retryCount, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = append(cmd.val, XPendingExt{
+ ID: id,
+ Consumer: consumer,
+ Idle: time.Duration(idle) * time.Millisecond,
+ RetryCount: retryCount,
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = readXGroupInfo(rd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
+ var group XInfoGroup
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return group, err
+ }
+ if n != 8 {
+ return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
+ }
+
+ for i := 0; i < 4; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return group, err
+ }
+
+ val, err := rd.ReadString()
+ if err != nil {
+ return group, err
+ }
+
+ switch key {
+ case "name":
+ group.Name = val
+ case "consumers":
+ group.Consumers, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
+ case "pending":
+ group.Pending, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID = val
+ default:
+ return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
+ }
+ }
+
+ return group, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ FirstEntry XMessage
+ LastEntry XMessage
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadReply(xStreamInfoParser)
+ if err != nil {
+ return err
+ }
+ cmd.val = v.(*XInfoStream)
+ return nil
+}
+
+func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 14 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+ "wanted 14", n)
+ }
+ var info XInfoStream
+ for i := 0; i < 7; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ switch key {
+ case "length":
+ info.Length, err = rd.ReadIntReply()
+ case "radix-tree-keys":
+ info.RadixTreeKeys, err = rd.ReadIntReply()
+ case "radix-tree-nodes":
+ info.RadixTreeNodes, err = rd.ReadIntReply()
+ case "groups":
+ info.Groups, err = rd.ReadIntReply()
+ case "last-generated-id":
+ info.LastGeneratedID, err = rd.ReadString()
+ case "first-entry":
+ info.FirstEntry, err = readXMessage(rd)
+ case "last-entry":
+ info.LastEntry, err = readXMessage(rd)
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", key)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &info, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]Z, n/2)
+ for i := 0; i < len(cmd.val); i++ {
+ member, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ score, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = Z{
+ Member: member,
+ Score: score,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 3 {
+ return nil, fmt.Errorf("got %d elements, expected 3", n)
+ }
+
+ cmd.val = &ZWithKey{}
+ var err error
+
+ cmd.val.Key, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Member, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.page, cmd.cursor, err = rd.ReadScanReply()
+ return err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]ClusterSlot, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 2 {
+ err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ return nil, err
+ }
+
+ start, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ end, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]ClusterNode, n-2)
+ for j := 0; j < len(nodes); j++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 && n != 3 {
+ err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+ return nil, err
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if n == 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].ID = id
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ if err != nil {
+ return err
+ }
+ cmd.locations = v.([]GeoLocation)
+ return nil
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ locs := make([]GeoLocation, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(newGeoLocationParser(q))
+ if err != nil {
+ return nil, err
+ }
+ switch vv := v.(type) {
+ case string:
+ locs = append(locs, GeoLocation{
+ Name: vv,
+ })
+ case *GeoLocation:
+ // TODO: avoid copying
+ locs = append(locs, *vv)
+ default:
+ return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+ }
+ }
+ return locs, nil
+ }
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ var loc GeoLocation
+ var err error
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if q.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithGeoHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithCoord {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 {
+ return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &loc, nil
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]*GeoPos, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ longitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ latitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]*CommandInfo, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(commandInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ vv := v.(*CommandInfo)
+ cmd.val[vv.Name] = vv
+ }
+ return nil, nil
+ })
+ return err
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+
+ switch n {
+ case numArgRedis5, numArgRedis6:
+ // continue
+ default:
+ return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
+ }
+
+ var cmd CommandInfo
+ var err error
+
+ cmd.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ arity, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Arity = int8(arity)
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.Flags = make([]string, n)
+ for i := 0; i < len(cmd.Flags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.Flags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.Flags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ firstKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.StepCount = int8(stepCount)
+
+ for _, flag := range cmd.Flags {
+ if flag == "readonly" {
+ cmd.ReadOnly = true
+ break
+ }
+ }
+
+ if n == numArgRedis5 {
+ return &cmd, nil
+ }
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.ACLFlags = make([]string, n)
+ for i := 0; i < len(cmd.ACLFlags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.ACLFlags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.ACLFlags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]SlowLog, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 4 {
+ err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
+ return nil, err
+ }
+
+ id, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ createdAt, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ createdAtTime := time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ costsDuration := time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if cmdLen < 1 {
+ err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ return nil, err
+ }
+
+ cmdString := make([]string, cmdLen)
+ for i := 0; i < cmdLen; i++ {
+ cmdString[i], err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var address, name string
+ for i := 4; i < n; i++ {
+ str, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if i == 4 {
+ address = str
+ } else if i == 5 {
+ name = str
+ }
+ }
+
+ cmd.val[i] = SlowLog{
+ ID: id,
+ Time: createdAtTime,
+ Duration: costsDuration,
+ Args: cmdString,
+ ClientAddr: address,
+ ClientName: name,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
new file mode 100644
index 0000000..79698ba
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/commands.go
@@ -0,0 +1,2773 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "io"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+)
+
+// KeepTTL is an option for Set command to keep key's existing TTL.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ default:
+ return append(dst, arg)
+ }
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Del(ctx context.Context, keys ...string) *IntCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+ Dump(ctx context.Context, key string) *StringCmd
+ Exists(ctx context.Context, keys ...string) *IntCmd
+ Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ Keys(ctx context.Context, pattern string) *StringSliceCmd
+ Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(ctx context.Context, key string, db int) *BoolCmd
+ ObjectRefCount(ctx context.Context, key string) *IntCmd
+ ObjectEncoding(ctx context.Context, key string) *StringCmd
+ ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+ Persist(ctx context.Context, key string) *BoolCmd
+ PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PTTL(ctx context.Context, key string) *DurationCmd
+ RandomKey(ctx context.Context) *StringCmd
+ Rename(ctx context.Context, key, newkey string) *StatusCmd
+ RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+ Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+ SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+ Touch(ctx context.Context, keys ...string) *IntCmd
+ TTL(ctx context.Context, key string) *DurationCmd
+ Type(ctx context.Context, key string) *StatusCmd
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
+
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+
+ HDel(ctx context.Context, key string, fields ...string) *IntCmd
+ HExists(ctx context.Context, key, field string) *BoolCmd
+ HGet(ctx context.Context, key, field string) *StringCmd
+ HGetAll(ctx context.Context, key string) *StringStringMapCmd
+ HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+ HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+ HKeys(ctx context.Context, key string) *StringSliceCmd
+ HLen(ctx context.Context, key string) *IntCmd
+ HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+ HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+ HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+ HVals(ctx context.Context, key string) *StringSliceCmd
+
+ BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LIndex(ctx context.Context, key string, index int64) *StringCmd
+ LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LLen(ctx context.Context, key string) *IntCmd
+ LPop(ctx context.Context, key string) *StringCmd
+ LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+ LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+ LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+ LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+ LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+ RPop(ctx context.Context, key string) *StringCmd
+ RPopLPush(ctx context.Context, source, destination string) *StringCmd
+ RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+
+ PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+ PFCount(ctx context.Context, keys ...string) *IntCmd
+ PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *SliceCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+
+ GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+ GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+ GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(ctx context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "dump", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "keys", pattern)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "move", key, db)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "refcount", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "object", "encoding", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "persist", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ ctx,
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "randomkey")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "rename", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ "replace",
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+ args := []interface{}{"sort", key}
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+ args := sort.args(key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "type", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+// Use expiration for `SETEX`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SETEX key expiration value` command.
+func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ )
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
+ a := make([]interface{}, 0, 2+len(args))
+ a = append(a, "bitfield")
+ a = append(a, key)
+ a = append(a, args...)
+ cmd := NewIntSliceCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hexists", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+ cmd := NewStringCmd(ctx, "hget", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "hgetall", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hkeys", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+// - HSet("myhash", "key1", "value1", "key2", "value2")
+// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it requires Redis v4 for multiple field/value pairs support.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hvals", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ ctx,
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "lindex", key, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "llen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type LPosArgs struct {
+ Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+ args := []interface{}{"lpos", key, value}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+ args := []interface{}{"lpos", key, value, "count", count}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ ctx,
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "lrem", key, count, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "lset", key, index, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+type XAddArgs struct {
+ Stream string
+ MaxLen int64 // MAXLEN N
+ MaxLenApprox int64 // MAXLEN ~ N
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xadd")
+ args = append(args, a.Stream)
+ if a.MaxLen > 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ } else if a.MaxLenApprox > 0 {
+ args = append(args, "maxlen", "~", a.MaxLenApprox)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 5+len(a.Streams))
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.setFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 8+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.setFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 4+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", maxLen)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", "~", maxLen)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+// Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zAdd(ctx context.Context, a []interface{}, n int, members ...*Z) *IntCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewIntCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 2
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1] = "zadd", key
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "nx"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "xx"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+func (c cmdable) zIncr(ctx context.Context, a []interface{}, n int, members ...*Z) *FloatCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewFloatCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 3
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2] = "zadd", key, "incr"
+ return c.zIncr(ctx, a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+ return c.zIncr(ctx, a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+ return c.zIncr(ctx, a, n, member)
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zinterstore"
+ args[1] = destination
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ cmd.setFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRange(ctx context.Context, key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []interface{}{
+ "zrange",
+ key,
+ start,
+ stop,
+ }
+ if withScores {
+ args = append(args, "withscores")
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(ctx, key, start, stop, false)
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zunionstore"
+ args[1] = dest
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ cmd.setFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "id")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "config", "get", parameter)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "resetstat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "rewrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
+ args := []interface{}{"info"}
+ if len(section) > 0 {
+ args = append(args, section[0])
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "lastsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "save")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "slaveof", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+ cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sync(ctx context.Context) {
+ panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+ cmd := NewTimeCmd(ctx, "time")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "debug", "object", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "eval"
+ cmdArgs[1] = script
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "evalsha"
+ cmdArgs[1] = sha1
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewStringIntMapCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(
+ ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/doc.go b/vendor/github.com/go-redis/redis/v8/doc.go
new file mode 100644
index 0000000..5526253
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
new file mode 100644
index 0000000..9fe1376
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/error.go
@@ -0,0 +1,122 @@
+package redis
+
+import (
+ "context"
+ "io"
+ "net"
+ "strings"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+var ErrClosed = pool.ErrClosed
+
+type Error interface {
+ error
+
+ // RedisError is a no-op function but
+ // serves to distinguish types that are Redis
+ // errors from ordinary errors: a type is a
+ // Redis error if it has a RedisError method.
+ RedisError()
+}
+
+var _ Error = proto.RedisError("")
+
+func shouldRetry(err error, retryTimeout bool) bool {
+ switch err {
+ case io.EOF, io.ErrUnexpectedEOF:
+ return true
+ case nil, context.Canceled, context.DeadlineExceeded:
+ return false
+ }
+
+ if v, ok := err.(timeoutError); ok {
+ if v.Timeout() {
+ return retryTimeout
+ }
+ return true
+ }
+
+ s := err.Error()
+ if s == "ERR max number of clients reached" {
+ return true
+ }
+ if strings.HasPrefix(s, "LOADING ") {
+ return true
+ }
+ if strings.HasPrefix(s, "READONLY ") {
+ return true
+ }
+ if strings.HasPrefix(s, "CLUSTERDOWN ") {
+ return true
+ }
+ if strings.HasPrefix(s, "TRYAGAIN ") {
+ return true
+ }
+
+ return false
+}
+
+func isRedisError(err error) bool {
+ _, ok := err.(proto.RedisError)
+ return ok
+}
+
+func isBadConn(err error, allowTimeout bool) bool {
+ if err == nil {
+ return false
+ }
+
+ if isRedisError(err) {
+ // Close connections in read only state in case domain addr is used
+ // and domain resolves to a different Redis Server. See #790.
+ return isReadOnlyError(err)
+ }
+
+ if allowTimeout {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return !netErr.Temporary()
+ }
+ }
+
+ return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+ if !isRedisError(err) {
+ return
+ }
+
+ s := err.Error()
+ switch {
+ case strings.HasPrefix(s, "MOVED "):
+ moved = true
+ case strings.HasPrefix(s, "ASK "):
+ ask = true
+ default:
+ return
+ }
+
+ ind := strings.LastIndex(s, " ")
+ if ind == -1 {
+ return false, false, ""
+ }
+ addr = s[ind+1:]
+ return
+}
+
+func isLoadingError(err error) bool {
+ return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+ return strings.HasPrefix(err.Error(), "READONLY ")
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutError interface {
+ Timeout() bool
+}
diff --git a/vendor/github.com/go-redis/redis/v8/go.mod b/vendor/github.com/go-redis/redis/v8/go.mod
new file mode 100644
index 0000000..7be1b7f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.mod
@@ -0,0 +1,11 @@
+module github.com/go-redis/redis/v8
+
+go 1.11
+
+require (
+ github.com/cespare/xxhash/v2 v2.1.1
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+ github.com/onsi/ginkgo v1.14.2
+ github.com/onsi/gomega v1.10.3
+ go.opentelemetry.io/otel v0.13.0
+)
diff --git a/vendor/github.com/go-redis/redis/v8/go.sum b/vendor/github.com/go-redis/redis/v8/go.sum
new file mode 100644
index 0000000..e1e8ab1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.sum
@@ -0,0 +1,82 @@
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
+github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA=
+go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/go-redis/redis/v8/internal/arg.go b/vendor/github.com/go-redis/redis/v8/internal/arg.go
new file mode 100644
index 0000000..b97fa0d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/arg.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+)
+
+func AppendArg(b []byte, v interface{}) []byte {
+ switch v := v.(type) {
+ case nil:
+ return append(b, "<nil>"...)
+ case string:
+ return appendUTF8String(b, Bytes(v))
+ case []byte:
+ return appendUTF8String(b, v)
+ case int:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int8:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int16:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int32:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int64:
+ return strconv.AppendInt(b, v, 10)
+ case uint:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint8:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint16:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint32:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint64:
+ return strconv.AppendUint(b, v, 10)
+ case float32:
+ return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
+ case float64:
+ return strconv.AppendFloat(b, v, 'f', -1, 64)
+ case bool:
+ if v {
+ return append(b, "true"...)
+ }
+ return append(b, "false"...)
+ case time.Time:
+ return v.AppendFormat(b, time.RFC3339Nano)
+ default:
+ return append(b, fmt.Sprint(v)...)
+ }
+}
+
+func appendUTF8String(dst []byte, src []byte) []byte {
+ dst = append(dst, src...)
+ return dst
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
new file mode 100644
index 0000000..2fc74ad
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
@@ -0,0 +1,78 @@
+package hashtag
+
+import (
+ "strings"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+ if s := strings.IndexByte(key, '{'); s > -1 {
+ if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+ return key[s+1 : s+e+1]
+ }
+ }
+ return key
+}
+
+func RandomSlot() int {
+ return rand.Intn(slotNumber)
+}
+
+// hashSlot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+ if key == "" {
+ return RandomSlot()
+ }
+ key = Key(key)
+ return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+ for i := 0; i < len(key); i++ {
+ crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+ }
+ return
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/instruments.go b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
new file mode 100644
index 0000000..e837526
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
@@ -0,0 +1,33 @@
+package internal
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/api/global"
+ "go.opentelemetry.io/otel/api/metric"
+)
+
+var (
+ // WritesCounter is a count of write commands performed.
+ WritesCounter metric.Int64Counter
+ // NewConnectionsCounter is a count of new connections.
+ NewConnectionsCounter metric.Int64Counter
+)
+
+func init() {
+ defer func() {
+ if r := recover(); r != nil {
+ Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
+ }
+ }()
+
+ meter := metric.Must(global.Meter("github.com/go-redis/redis"))
+
+ WritesCounter = meter.NewInt64Counter("redis.writes",
+ metric.WithDescription("the number of writes initiated"),
+ )
+
+ NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
+ metric.WithDescription("the number of connections created"),
+ )
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/internal.go b/vendor/github.com/go-redis/redis/v8/internal/internal.go
new file mode 100644
index 0000000..4a59c59
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/internal.go
@@ -0,0 +1,29 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+ if retry < 0 {
+ panic("not reached")
+ }
+ if minBackoff == 0 {
+ return 0
+ }
+
+ d := minBackoff << uint(retry)
+ if d < minBackoff {
+ return maxBackoff
+ }
+
+ d = minBackoff + time.Duration(rand.Int63n(int64(d)))
+
+ if d > maxBackoff || d < minBackoff {
+ d = maxBackoff
+ }
+
+ return d
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
new file mode 100644
index 0000000..3810f9e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/log.go
@@ -0,0 +1,24 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+)
+
+type Logging interface {
+ Printf(ctx context.Context, format string, v ...interface{})
+}
+
+type logger struct {
+ log *log.Logger
+}
+
+func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
+ _ = l.log.Output(2, fmt.Sprintf(format, v...))
+}
+
+var Logger Logging = &logger{
+ log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go
new file mode 100644
index 0000000..64f4627
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/once.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+ m sync.Mutex
+ done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once. In other words, given
+// var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error. A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+ if atomic.LoadUint32(&o.done) == 1 {
+ return nil
+ }
+ // Slow-path.
+ o.m.Lock()
+ defer o.m.Unlock()
+ var err error
+ if o.done == 0 {
+ err = f()
+ if err == nil {
+ atomic.StoreUint32(&o.done, 1)
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
new file mode 100644
index 0000000..08a2071
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
@@ -0,0 +1,136 @@
+package pool
+
+import (
+ "bufio"
+ "context"
+ "net"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+ usedAt int64 // atomic
+ netConn net.Conn
+
+ rd *proto.Reader
+ bw *bufio.Writer
+ wr *proto.Writer
+
+ Inited bool
+ pooled bool
+ createdAt time.Time
+}
+
+func NewConn(netConn net.Conn) *Conn {
+ cn := &Conn{
+ netConn: netConn,
+ createdAt: time.Now(),
+ }
+ cn.rd = proto.NewReader(netConn)
+ cn.bw = bufio.NewWriter(netConn)
+ cn.wr = proto.NewWriter(cn.bw)
+ cn.SetUsedAt(time.Now())
+ return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+ unix := atomic.LoadInt64(&cn.usedAt)
+ return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+ atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+ cn.netConn = netConn
+ cn.rd.Reset(netConn)
+ cn.bw.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+ return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+ if cn.netConn != nil {
+ return cn.netConn.RemoteAddr()
+ }
+ return nil
+}
+
+func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
+ return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error {
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+ if err := fn(cn.rd); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+ return nil
+ })
+}
+
+func (cn *Conn) WithWriter(
+ ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+ return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error {
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+
+ if cn.bw.Buffered() > 0 {
+ cn.bw.Reset(cn.netConn)
+ }
+
+ if err := fn(cn.wr); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+
+ if err := cn.bw.Flush(); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+
+ internal.WritesCounter.Add(ctx, 1)
+
+ return nil
+ })
+}
+
+func (cn *Conn) Close() error {
+ return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+ tm := time.Now()
+ cn.SetUsedAt(tm)
+
+ if timeout > 0 {
+ tm = tm.Add(timeout)
+ }
+
+ if ctx != nil {
+ deadline, ok := ctx.Deadline()
+ if ok {
+ if timeout == 0 {
+ return deadline
+ }
+ if deadline.Before(tm) {
+ return deadline
+ }
+ return tm
+ }
+ }
+
+ if timeout > 0 {
+ return tm
+ }
+
+ return noDeadline
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
new file mode 100644
index 0000000..355742b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
@@ -0,0 +1,524 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+)
+
+var (
+ ErrClosed = errors.New("redis: client is closed")
+ ErrPoolTimeout = errors.New("redis: connection pool timeout")
+)
+
+var timers = sync.Pool{
+ New: func() interface{} {
+ t := time.NewTimer(time.Hour)
+ t.Stop()
+ return t
+ },
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+ Hits uint32 // number of times free connection was found in the pool
+ Misses uint32 // number of times free connection was NOT found in the pool
+ Timeouts uint32 // number of times a wait timeout occurred
+
+ TotalConns uint32 // number of total connections in the pool
+ IdleConns uint32 // number of idle connections in the pool
+ StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+ NewConn(context.Context) (*Conn, error)
+ CloseConn(*Conn) error
+
+ Get(context.Context) (*Conn, error)
+ Put(context.Context, *Conn)
+ Remove(context.Context, *Conn, error)
+
+ Len() int
+ IdleLen() int
+ Stats() *Stats
+
+ Close() error
+}
+
+type Options struct {
+ Dialer func(context.Context) (net.Conn, error)
+ OnClose func(*Conn) error
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+type lastDialErrorWrap struct {
+ err error
+}
+
+type ConnPool struct {
+ opt *Options
+
+ dialErrorsNum uint32 // atomic
+
+ lastDialError atomic.Value
+
+ queue chan struct{}
+
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+ poolSize int
+ idleConnsLen int
+
+ stats Stats
+
+ _closed uint32 // atomic
+ closedCh chan struct{}
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+ p := &ConnPool{
+ opt: opt,
+
+ queue: make(chan struct{}, opt.PoolSize),
+ conns: make([]*Conn, 0, opt.PoolSize),
+ idleConns: make([]*Conn, 0, opt.PoolSize),
+ closedCh: make(chan struct{}),
+ }
+
+ p.connsMu.Lock()
+ p.checkMinIdleConns()
+ p.connsMu.Unlock()
+
+ if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+ go p.reaper(opt.IdleCheckFrequency)
+ }
+
+ return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+ if p.opt.MinIdleConns == 0 {
+ return
+ }
+ for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+ p.poolSize++
+ p.idleConnsLen++
+ go func() {
+ err := p.addIdleConn()
+ if err != nil {
+ p.connsMu.Lock()
+ p.poolSize--
+ p.idleConnsLen--
+ p.connsMu.Unlock()
+ }
+ }()
+ }
+}
+
+func (p *ConnPool) addIdleConn() error {
+ cn, err := p.dialConn(context.TODO(), true)
+ if err != nil {
+ return err
+ }
+
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ p.idleConns = append(p.idleConns, cn)
+ p.connsMu.Unlock()
+ return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+ cn, err := p.dialConn(ctx, pooled)
+ if err != nil {
+ return nil, err
+ }
+
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ if pooled {
+ // If pool is full remove the cn on next Put.
+ if p.poolSize >= p.opt.PoolSize {
+ cn.pooled = false
+ } else {
+ p.poolSize++
+ }
+ }
+ p.connsMu.Unlock()
+
+ return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+ return nil, p.getLastDialError()
+ }
+
+ netConn, err := p.opt.Dialer(ctx)
+ if err != nil {
+ p.setLastDialError(err)
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+ go p.tryDial()
+ }
+ return nil, err
+ }
+
+ internal.NewConnectionsCounter.Add(ctx, 1)
+ cn := NewConn(netConn)
+ cn.pooled = pooled
+ return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+ for {
+ if p.closed() {
+ return
+ }
+
+ conn, err := p.opt.Dialer(context.Background())
+ if err != nil {
+ p.setLastDialError(err)
+ time.Sleep(time.Second)
+ continue
+ }
+
+ atomic.StoreUint32(&p.dialErrorsNum, 0)
+ _ = conn.Close()
+ return
+ }
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+ p.lastDialError.Store(&lastDialErrorWrap{err: err})
+}
+
+func (p *ConnPool) getLastDialError() error {
+ err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
+ if err != nil {
+ return err.err
+ }
+ return nil
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ err := p.waitTurn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ p.connsMu.Lock()
+ cn := p.popIdle()
+ p.connsMu.Unlock()
+
+ if cn == nil {
+ break
+ }
+
+ if p.isStaleConn(cn) {
+ _ = p.CloseConn(cn)
+ continue
+ }
+
+ atomic.AddUint32(&p.stats.Hits, 1)
+ return cn, nil
+ }
+
+ atomic.AddUint32(&p.stats.Misses, 1)
+
+ newcn, err := p.newConn(ctx, true)
+ if err != nil {
+ p.freeTurn()
+ return nil, err
+ }
+
+ return newcn, nil
+}
+
+func (p *ConnPool) getTurn() {
+ p.queue <- struct{}{}
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ select {
+ case p.queue <- struct{}{}:
+ return nil
+ default:
+ }
+
+ timer := timers.Get().(*time.Timer)
+ timer.Reset(p.opt.PoolTimeout)
+
+ select {
+ case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return ctx.Err()
+ case p.queue <- struct{}{}:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return nil
+ case <-timer.C:
+ timers.Put(timer)
+ atomic.AddUint32(&p.stats.Timeouts, 1)
+ return ErrPoolTimeout
+ }
+}
+
+func (p *ConnPool) freeTurn() {
+ <-p.queue
+}
+
+func (p *ConnPool) popIdle() *Conn {
+ if len(p.idleConns) == 0 {
+ return nil
+ }
+
+ idx := len(p.idleConns) - 1
+ cn := p.idleConns[idx]
+ p.idleConns = p.idleConns[:idx]
+ p.idleConnsLen--
+ p.checkMinIdleConns()
+ return cn
+}
+
+func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
+ if cn.rd.Buffered() > 0 {
+ internal.Logger.Printf(ctx, "Conn has unread data")
+ p.Remove(ctx, cn, BadConnError{})
+ return
+ }
+
+ if !cn.pooled {
+ p.Remove(ctx, cn, nil)
+ return
+ }
+
+ p.connsMu.Lock()
+ p.idleConns = append(p.idleConns, cn)
+ p.idleConnsLen++
+ p.connsMu.Unlock()
+ p.freeTurn()
+}
+
+func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.removeConnWithLock(cn)
+ p.freeTurn()
+ _ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+ p.removeConnWithLock(cn)
+ return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+ p.connsMu.Lock()
+ p.removeConn(cn)
+ p.connsMu.Unlock()
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+ for i, c := range p.conns {
+ if c == cn {
+ p.conns = append(p.conns[:i], p.conns[i+1:]...)
+ if cn.pooled {
+ p.poolSize--
+ p.checkMinIdleConns()
+ }
+ return
+ }
+ }
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+ if p.opt.OnClose != nil {
+ _ = p.opt.OnClose(cn)
+ }
+ return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+ p.connsMu.Lock()
+ n := len(p.conns)
+ p.connsMu.Unlock()
+ return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+ p.connsMu.Lock()
+ n := p.idleConnsLen
+ p.connsMu.Unlock()
+ return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+ idleLen := p.IdleLen()
+ return &Stats{
+ Hits: atomic.LoadUint32(&p.stats.Hits),
+ Misses: atomic.LoadUint32(&p.stats.Misses),
+ Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
+ TotalConns: uint32(p.Len()),
+ IdleConns: uint32(idleLen),
+ StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+ }
+}
+
+func (p *ConnPool) closed() bool {
+ return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ var firstErr error
+ for _, cn := range p.conns {
+ if fn(cn) {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+ return firstErr
+}
+
+func (p *ConnPool) Close() error {
+ if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+ return ErrClosed
+ }
+ close(p.closedCh)
+
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ p.conns = nil
+ p.poolSize = 0
+ p.idleConns = nil
+ p.idleConnsLen = 0
+ p.connsMu.Unlock()
+
+ return firstErr
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ // It is possible that ticker and closedCh arrive together,
+ // and select pseudo-randomly pick ticker case, we double
+ // check here to prevent being executed after closed.
+ if p.closed() {
+ return
+ }
+ _, err := p.ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
+ continue
+ }
+ case <-p.closedCh:
+ return
+ }
+ }
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+ var n int
+ for {
+ p.getTurn()
+
+ p.connsMu.Lock()
+ cn := p.reapStaleConn()
+ p.connsMu.Unlock()
+ p.freeTurn()
+
+ if cn != nil {
+ _ = p.closeConn(cn)
+ n++
+ } else {
+ break
+ }
+ }
+ atomic.AddUint32(&p.stats.StaleConns, uint32(n))
+ return n, nil
+}
+
+func (p *ConnPool) reapStaleConn() *Conn {
+ if len(p.idleConns) == 0 {
+ return nil
+ }
+
+ cn := p.idleConns[0]
+ if !p.isStaleConn(cn) {
+ return nil
+ }
+
+ p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+ p.idleConnsLen--
+ p.removeConn(cn)
+
+ return cn
+}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+ if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+ return false
+ }
+
+ now := time.Now()
+ if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+ return true
+ }
+ if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
new file mode 100644
index 0000000..5a3fde1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
@@ -0,0 +1,58 @@
+package pool
+
+import "context"
+
+type SingleConnPool struct {
+ pool Pooler
+ cn *Conn
+ stickyErr error
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
+ return &SingleConnPool{
+ pool: pool,
+ cn: cn,
+ }
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.stickyErr != nil {
+ return nil, p.stickyErr
+ }
+ return p.cn, nil
+}
+
+func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
+
+func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.cn = nil
+ p.stickyErr = reason
+}
+
+func (p *SingleConnPool) Close() error {
+ p.cn = nil
+ p.stickyErr = ErrClosed
+ return nil
+}
+
+func (p *SingleConnPool) Len() int {
+ return 0
+}
+
+func (p *SingleConnPool) IdleLen() int {
+ return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
new file mode 100644
index 0000000..c3e7e7c
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
@@ -0,0 +1,202 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync/atomic"
+)
+
+const (
+ stateDefault = 0
+ stateInited = 1
+ stateClosed = 2
+)
+
+type BadConnError struct {
+ wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+ s := "redis: Conn is in a bad state"
+ if e.wrapped != nil {
+ s += ": " + e.wrapped.Error()
+ }
+ return s
+}
+
+func (e BadConnError) Unwrap() error {
+ return e.wrapped
+}
+
+//------------------------------------------------------------------------------
+
+type StickyConnPool struct {
+ pool Pooler
+ shared int32 // atomic
+
+ state uint32 // atomic
+ ch chan *Conn
+
+ _badConnError atomic.Value
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool Pooler) *StickyConnPool {
+ p, ok := pool.(*StickyConnPool)
+ if !ok {
+ p = &StickyConnPool{
+ pool: pool,
+ ch: make(chan *Conn, 1),
+ }
+ }
+ atomic.AddInt32(&p.shared, 1)
+ return p
+}
+
+func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *StickyConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+ // In worst case this races with Close which is not a very common operation.
+ for i := 0; i < 1000; i++ {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ cn, err := p.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+ return cn, nil
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ case stateInited:
+ if err := p.badConnError(); err != nil {
+ return nil, err
+ }
+ cn, ok := <-p.ch
+ if !ok {
+ return nil, ErrClosed
+ }
+ return cn, nil
+ case stateClosed:
+ return nil, ErrClosed
+ default:
+ panic("not reached")
+ }
+ }
+ return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
+}
+
+func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
+ defer func() {
+ if recover() != nil {
+ p.freeConn(ctx, cn)
+ }
+ }()
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
+ if err := p.badConnError(); err != nil {
+ p.pool.Remove(ctx, cn, err)
+ } else {
+ p.pool.Put(ctx, cn)
+ }
+}
+
+func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ defer func() {
+ if recover() != nil {
+ p.pool.Remove(ctx, cn, ErrClosed)
+ }
+ }()
+ p._badConnError.Store(BadConnError{wrapped: reason})
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) Close() error {
+ if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
+ return nil
+ }
+
+ for i := 0; i < 1000; i++ {
+ state := atomic.LoadUint32(&p.state)
+ if state == stateClosed {
+ return ErrClosed
+ }
+ if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+ close(p.ch)
+ cn, ok := <-p.ch
+ if ok {
+ p.freeConn(context.TODO(), cn)
+ }
+ return nil
+ }
+ }
+
+ return errors.New("redis: StickyConnPool.Close: infinite loop")
+}
+
+func (p *StickyConnPool) Reset(ctx context.Context) error {
+ if p.badConnError() == nil {
+ return nil
+ }
+
+ select {
+ case cn, ok := <-p.ch:
+ if !ok {
+ return ErrClosed
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ p._badConnError.Store(BadConnError{wrapped: nil})
+ default:
+ return errors.New("redis: StickyConnPool does not have a Conn")
+ }
+
+ if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+ state := atomic.LoadUint32(&p.state)
+ return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
+ }
+
+ return nil
+}
+
+func (p *StickyConnPool) badConnError() error {
+ if v := p._badConnError.Load(); v != nil {
+ err := v.(BadConnError)
+ if err.wrapped != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *StickyConnPool) Len() int {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ return 0
+ case stateInited:
+ return 1
+ case stateClosed:
+ return 0
+ default:
+ panic("not reached")
+ }
+}
+
+func (p *StickyConnPool) IdleLen() int {
+ return len(p.ch)
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
new file mode 100644
index 0000000..0fbc51e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
@@ -0,0 +1,331 @@
+package proto
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+const (
+ ErrorReply = '-'
+ StatusReply = '+'
+ IntReply = ':'
+ StringReply = '$'
+ ArrayReply = '*'
+)
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil")
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+//------------------------------------------------------------------------------
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+ rd *bufio.Reader
+ _buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ rd: bufio.NewReader(rd),
+ _buf: make([]byte, 64),
+ }
+}
+
+func (r *Reader) Buffered() int {
+ return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+ return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.rd.Reset(rd)
+}
+
+func (r *Reader) ReadLine() ([]byte, error) {
+ line, err := r.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if isNilReply(line) {
+ return nil, Nil
+ }
+ return line, nil
+}
+
+// readLine that returns an error if:
+// - there is a pending read error;
+// - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+ b, err := r.rd.ReadSlice('\n')
+ if err != nil {
+ if err != bufio.ErrBufferFull {
+ return nil, err
+ }
+
+ full := make([]byte, len(b))
+ copy(full, b)
+
+ b, err = r.rd.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ full = append(full, b...)
+ b = full
+ }
+ if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+ return nil, fmt.Errorf("redis: invalid reply: %q", b)
+ }
+ return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ case StringReply:
+ return r.readStringReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ if m == nil {
+ err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
+ return nil, err
+ }
+ return m(r, n)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) ReadIntReply() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ default:
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadString() (string, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return "", ParseErrorReply(line)
+ case StringReply:
+ return r.readStringReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return string(line[1:]), nil
+ default:
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+ }
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ if isNilReply(line) {
+ return "", Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return "", err
+ }
+
+ b := make([]byte, replyLen+2)
+ _, err = io.ReadFull(r.rd, b)
+ if err != nil {
+ return "", err
+ }
+
+ return util.BytesToString(b[:replyLen]), nil
+}
+
+func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ return m(r, n)
+ default:
+ return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadArrayLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return 0, err
+ }
+ return int(n), nil
+ default:
+ return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadScanReply() ([]string, uint64, error) {
+ n, err := r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != 2 {
+ return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+ }
+
+ cursor, err := r.ReadUint()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ n, err = r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ keys := make([]string, n)
+
+ for i := 0; i < n; i++ {
+ key, err := r.ReadString()
+ if err != nil {
+ return nil, 0, err
+ }
+ keys[i] = key
+ }
+
+ return keys, cursor, err
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseUint(b, 10, 64)
+}
+
+func (r *Reader) ReadFloatReply() (float64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseFloat(b, 64)
+}
+
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StringReply:
+ return r._readTmpBytesReply(line)
+ case StatusReply:
+ return line[1:], nil
+ default:
+ return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+ }
+}
+
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+ if isNilReply(line) {
+ return nil, Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ buf := r.buf(replyLen + 2)
+ _, err = io.ReadFull(r.rd, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf[:replyLen], nil
+}
+
+func (r *Reader) buf(n int) []byte {
+ if n <= cap(r._buf) {
+ return r._buf[:n]
+ }
+ d := n - cap(r._buf)
+ r._buf = append(r._buf, make([]byte, d)...)
+ return r._buf
+}
+
+func isNilReply(b []byte) bool {
+ return len(b) == 3 &&
+ (b[0] == StringReply || b[0] == ArrayReply) &&
+ b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+ return RedisError(string(line[1:]))
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+ if isNilReply(line) {
+ return 0, Nil
+ }
+ return util.ParseInt(line[1:], 10, 64)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
new file mode 100644
index 0000000..08d18d3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
@@ -0,0 +1,173 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+// Scan parses bytes `b` to `v` with appropriate type.
+// nolint: gocyclo
+func Scan(b []byte, v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return fmt.Errorf("redis: Scan(nil)")
+ case *string:
+ *v = util.BytesToString(b)
+ return nil
+ case *[]byte:
+ *v = b
+ return nil
+ case *int:
+ var err error
+ *v, err = util.Atoi(b)
+ return err
+ case *int8:
+ n, err := util.ParseInt(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = int8(n)
+ return nil
+ case *int16:
+ n, err := util.ParseInt(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = int16(n)
+ return nil
+ case *int32:
+ n, err := util.ParseInt(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = int32(n)
+ return nil
+ case *int64:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *uint:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = uint(n)
+ return nil
+ case *uint8:
+ n, err := util.ParseUint(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = uint8(n)
+ return nil
+ case *uint16:
+ n, err := util.ParseUint(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = uint16(n)
+ return nil
+ case *uint32:
+ n, err := util.ParseUint(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = uint32(n)
+ return nil
+ case *uint64:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *float32:
+ n, err := util.ParseFloat(b, 32)
+ if err != nil {
+ return err
+ }
+ *v = float32(n)
+ return err
+ case *float64:
+ var err error
+ *v, err = util.ParseFloat(b, 64)
+ return err
+ case *bool:
+ *v = len(b) == 1 && b[0] == '1'
+ return nil
+ case *time.Time:
+ var err error
+ *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
+ return err
+ case encoding.BinaryUnmarshaler:
+ return v.UnmarshalBinary(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+ }
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+ v := reflect.ValueOf(slice)
+ if !v.IsValid() {
+ return fmt.Errorf("redis: ScanSlice(nil)")
+ }
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Slice {
+ return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+ }
+
+ next := makeSliceNextElemFunc(v)
+ for i, s := range data {
+ elem := next()
+ if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+ elemType := v.Type().Elem()
+
+ if elemType.Kind() == reflect.Ptr {
+ elemType = elemType.Elem()
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ elem := v.Index(v.Len() - 1)
+ if elem.IsNil() {
+ elem.Set(reflect.New(elemType))
+ }
+ return elem.Elem()
+ }
+
+ elem := reflect.New(elemType)
+ v.Set(reflect.Append(v, elem))
+ return elem.Elem()
+ }
+ }
+
+ zero := reflect.Zero(elemType)
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ return v.Index(v.Len() - 1)
+ }
+
+ v.Set(reflect.Append(v, zero))
+ return v.Index(v.Len() - 1)
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
new file mode 100644
index 0000000..81b09b8
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
@@ -0,0 +1,153 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+type writer interface {
+ io.Writer
+ io.ByteWriter
+ // io.StringWriter
+ WriteString(s string) (n int, err error)
+}
+
+type Writer struct {
+ writer
+
+ lenBuf []byte
+ numBuf []byte
+}
+
+func NewWriter(wr writer) *Writer {
+ return &Writer{
+ writer: wr,
+
+ lenBuf: make([]byte, 64),
+ numBuf: make([]byte, 64),
+ }
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+ if err := w.WriteByte(ArrayReply); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(args)); err != nil {
+ return err
+ }
+
+ for _, arg := range args {
+ if err := w.WriteArg(arg); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+ w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+ w.lenBuf = append(w.lenBuf, '\r', '\n')
+ _, err := w.Write(w.lenBuf)
+ return err
+}
+
+func (w *Writer) WriteArg(v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return w.string("")
+ case string:
+ return w.string(v)
+ case []byte:
+ return w.bytes(v)
+ case int:
+ return w.int(int64(v))
+ case int8:
+ return w.int(int64(v))
+ case int16:
+ return w.int(int64(v))
+ case int32:
+ return w.int(int64(v))
+ case int64:
+ return w.int(v)
+ case uint:
+ return w.uint(uint64(v))
+ case uint8:
+ return w.uint(uint64(v))
+ case uint16:
+ return w.uint(uint64(v))
+ case uint32:
+ return w.uint(uint64(v))
+ case uint64:
+ return w.uint(v)
+ case float32:
+ return w.float(float64(v))
+ case float64:
+ return w.float(v)
+ case bool:
+ if v {
+ return w.int(1)
+ }
+ return w.int(0)
+ case time.Time:
+ w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+ return w.bytes(w.numBuf)
+ case encoding.BinaryMarshaler:
+ b, err := v.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ return w.bytes(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+ }
+}
+
+func (w *Writer) bytes(b []byte) error {
+ if err := w.WriteByte(StringReply); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(b)); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+
+ return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+ return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+ w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+ w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+ w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+ if err := w.WriteByte('\r'); err != nil {
+ return err
+ }
+ return w.WriteByte('\n')
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
new file mode 100644
index 0000000..40676f3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
@@ -0,0 +1,45 @@
+package rand
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return pseudo.Int() }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Intn(n int) int { return pseudo.Intn(n) }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return pseudo.Int63n(n) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return pseudo.Perm(n) }
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as if
+// seeded by Seed(1).
+func Seed(n int64) { pseudo.Seed(n) }
+
+var pseudo = rand.New(&source{src: rand.NewSource(1)})
+
+type source struct {
+ src rand.Source
+ mu sync.Mutex
+}
+
+func (s *source) Int63() int64 {
+ s.mu.Lock()
+ n := s.src.Int63()
+ s.mu.Unlock()
+ return n
+}
+
+func (s *source) Seed(seed int64) {
+ s.mu.Lock()
+ s.src.Seed(seed)
+ s.mu.Unlock()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
new file mode 100644
index 0000000..862ff0e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package internal
+
+func String(b []byte) string {
+ return string(b)
+}
+
+func Bytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
new file mode 100644
index 0000000..4bc7970
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
@@ -0,0 +1,20 @@
+// +build !appengine
+
+package internal
+
+import "unsafe"
+
+// String converts byte slice to string.
+func String(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// Bytes converts string to byte slice.
+func Bytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
new file mode 100644
index 0000000..894382b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util.go
@@ -0,0 +1,81 @@
+package internal
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/util"
+ "go.opentelemetry.io/otel/api/global"
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+ return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ })
+}
+
+func ToLower(s string) string {
+ if isLower(s) {
+ return s
+ }
+
+ b := make([]byte, len(s))
+ for i := range b {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func Unwrap(err error) error {
+ u, ok := err.(interface {
+ Unwrap() error
+ })
+ if !ok {
+ return nil
+ }
+ return u.Unwrap()
+}
+
+//------------------------------------------------------------------------------
+
+func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
+ if span := trace.SpanFromContext(ctx); !span.IsRecording() {
+ return fn(ctx, span)
+ }
+
+ ctx, span := global.Tracer("github.com/go-redis/redis").Start(ctx, name)
+ defer span.End()
+
+ return fn(ctx, span)
+}
+
+func RecordError(ctx context.Context, err error) error {
+ if err != proto.Nil {
+ trace.SpanFromContext(ctx).RecordError(ctx, err)
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
new file mode 100644
index 0000000..1b3060e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+ return string(b)
+}
+
+func StringToBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
new file mode 100644
index 0000000..db50338
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+ return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+ return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+ return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+ return strconv.ParseFloat(BytesToString(b), bitSize)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
new file mode 100644
index 0000000..c9868aa
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
@@ -0,0 +1,22 @@
+// +build !appengine
+
+package util
+
+import (
+ "unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/go-redis/redis/v8/iterator.go b/vendor/github.com/go-redis/redis/v8/iterator.go
new file mode 100644
index 0000000..2f8bc2b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/iterator.go
@@ -0,0 +1,77 @@
+package redis
+
+import (
+ "context"
+ "sync"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+ mu sync.Mutex // protects Scanner and pos
+ cmd *ScanCmd
+ pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+ it.mu.Lock()
+ err := it.cmd.Err()
+ it.mu.Unlock()
+ return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next(ctx context.Context) bool {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ // Instantly return on errors.
+ if it.cmd.Err() != nil {
+ return false
+ }
+
+ // Advance cursor, check if we are still within range.
+ if it.pos < len(it.cmd.page) {
+ it.pos++
+ return true
+ }
+
+ for {
+ // Return if there is no more data to fetch.
+ if it.cmd.cursor == 0 {
+ return false
+ }
+
+ // Fetch next page.
+ switch it.cmd.args[0] {
+ case "scan", "qscan":
+ it.cmd.args[1] = it.cmd.cursor
+ default:
+ it.cmd.args[2] = it.cmd.cursor
+ }
+
+ err := it.cmd.process(ctx, it.cmd)
+ if err != nil {
+ return false
+ }
+
+ it.pos = 1
+
+ // Redis can occasionally return empty page.
+ if len(it.cmd.page) > 0 {
+ return true
+ }
+ }
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+ var v string
+ it.mu.Lock()
+ if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+ v = it.cmd.page[it.pos-1]
+ }
+ it.mu.Unlock()
+ return v
+}
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
new file mode 100644
index 0000000..f2c16c5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/options.go
@@ -0,0 +1,317 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "go.opentelemetry.io/otel/api/trace"
+ "go.opentelemetry.io/otel/label"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+ // Allow returns nil if operation is allowed or an error otherwise.
+ // If operation is allowed client must ReportResult of the operation
+ // whether it is a success or a failure.
+ Allow() error
+ // ReportResult reports the result of the previously allowed operation.
+ // nil indicates a success, non-nil error usually indicates a failure.
+ ReportResult(result error)
+}
+
+// Options keeps the settings to setup redis connection.
+type Options struct {
+ // The network type, either tcp or unix.
+ // Default is tcp.
+ Network string
+ // host:port address.
+ Addr string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Hook that is called when new connection is established.
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ // Use the specified Username to authenticate the current connection
+ // with one of the connections defined in the ACL list when connecting
+ // to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ Username string
+ // Optional password. Must match the password specified in the
+ // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
+ // or the User Password when connecting to a Redis 6.0 instance, or greater,
+ // that is using the Redis ACL system.
+ Password string
+
+ // Database to be selected after connecting to the server.
+ DB int
+
+ // Maximum number of retries before giving up.
+ // Default is 3 retries.
+ MaxRetries int
+ // Minimum backoff between each retry.
+ // Default is 8 milliseconds; -1 disables backoff.
+ MinRetryBackoff time.Duration
+ // Maximum backoff between each retry.
+ // Default is 512 milliseconds; -1 disables backoff.
+ MaxRetryBackoff time.Duration
+
+ // Dial timeout for establishing new connections.
+ // Default is 5 seconds.
+ DialTimeout time.Duration
+ // Timeout for socket reads. If reached, commands will fail
+ // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
+ // Default is 3 seconds.
+ ReadTimeout time.Duration
+ // Timeout for socket writes. If reached, commands will fail
+ // with a timeout instead of blocking.
+ // Default is ReadTimeout.
+ WriteTimeout time.Duration
+
+ // Maximum number of socket connections.
+ // Default is 10 connections per every CPU as reported by runtime.NumCPU.
+ PoolSize int
+ // Minimum number of idle connections which is useful when establishing
+ // new connection is slow.
+ MinIdleConns int
+ // Connection age at which client retires (closes) the connection.
+ // Default is to not close aged connections.
+ MaxConnAge time.Duration
+ // Amount of time client waits for connection if all connections
+ // are busy before returning an error.
+ // Default is ReadTimeout + 1 second.
+ PoolTimeout time.Duration
+ // Amount of time after which client closes idle connections.
+ // Should be less than server's timeout.
+ // Default is 5 minutes. -1 disables idle timeout check.
+ IdleTimeout time.Duration
+ // Frequency of idle checks made by idle connections reaper.
+ // Default is 1 minute. -1 disables idle connections reaper,
+ // but idle connections are still discarded by the client
+ // if IdleTimeout is set.
+ IdleCheckFrequency time.Duration
+
+ // Enables read only queries on slave nodes.
+ readOnly bool
+
+ // TLS Config to use. When set TLS will be negotiated.
+ TLSConfig *tls.Config
+
+ // Limiter interface used to implemented circuit breaker or rate limiter.
+ Limiter Limiter
+}
+
+func (opt *Options) init() {
+ if opt.Addr == "" {
+ opt.Addr = "localhost:6379"
+ }
+ if opt.Network == "" {
+ if strings.HasPrefix(opt.Addr, "/") {
+ opt.Network = "unix"
+ } else {
+ opt.Network = "tcp"
+ }
+ }
+ if opt.DialTimeout == 0 {
+ opt.DialTimeout = 5 * time.Second
+ }
+ if opt.Dialer == nil {
+ opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+ }
+ }
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 10 * runtime.NumCPU()
+ }
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+ if opt.PoolTimeout == 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ }
+ if opt.IdleTimeout == 0 {
+ opt.IdleTimeout = 5 * time.Minute
+ }
+ if opt.IdleCheckFrequency == 0 {
+ opt.IdleCheckFrequency = time.Minute
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *Options) clone() *Options {
+ clone := *opt
+ return &clone
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+// Scheme is required.
+// There are two connection types: by tcp socket and by unix socket.
+// Tcp connection:
+// redis://<user>:<password>@<host>:<port>/<db_number>
+// Unix connection:
+// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+func ParseURL(redisURL string) (*Options, error) {
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "redis", "rediss":
+ return setupTCPConn(u)
+ case "unix":
+ return setupUnixConn(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+ o := &Options{Network: "tcp"}
+
+ o.Username, o.Password = getUserPassword(u)
+
+ if len(u.Query()) > 0 {
+ return nil, errors.New("redis: no options supported")
+ }
+
+ h, p, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ h = u.Host
+ }
+ if h == "" {
+ h = "localhost"
+ }
+ if p == "" {
+ p = "6379"
+ }
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+ }
+ default:
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{ServerName: h}
+ }
+
+ return o, nil
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+ o := &Options{
+ Network: "unix",
+ }
+
+ if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+ return nil, errors.New("redis: empty unix socket path")
+ }
+ o.Addr = u.Path
+
+ o.Username, o.Password = getUserPassword(u)
+
+ dbStr := u.Query().Get("db")
+ if dbStr == "" {
+ return o, nil // if database is not set, connect to 0 db.
+ }
+
+ db, err := strconv.Atoi(dbStr)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %s", err)
+ }
+ o.DB = db
+
+ return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+ var user, password string
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ password = p
+ }
+ }
+ return user, password
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ var conn net.Conn
+ err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error {
+ span.SetAttributes(
+ label.String("db.connection_string", opt.Addr),
+ )
+
+ var err error
+ conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
+ if err != nil {
+ _ = internal.RecordError(ctx, err)
+ }
+ return err
+ })
+ return conn, err
+ },
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ })
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
new file mode 100644
index 0000000..c6ec340
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/pipeline.go
@@ -0,0 +1,137 @@
+package redis
+
+import (
+ "context"
+ "sync"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// singe step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+ StatefulCmdable
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Close() error
+ Discard() error
+ Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+ cmdable
+ statefulCmdable
+
+ ctx context.Context
+ exec pipelineExecer
+
+ mu sync.Mutex
+ cmds []Cmder
+ closed bool
+}
+
+func (c *Pipeline) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+ c.mu.Lock()
+ c.cmds = append(c.cmds, cmd)
+ c.mu.Unlock()
+ return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+ c.mu.Lock()
+ _ = c.discard()
+ c.closed = true
+ c.mu.Unlock()
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+ c.mu.Lock()
+ err := c.discard()
+ c.mu.Unlock()
+ return err
+}
+
+func (c *Pipeline) discard() error {
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.cmds = c.cmds[:0]
+ return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ cmds, err := c.Exec(ctx)
+ _ = c.Close()
+ return cmds, err
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
new file mode 100644
index 0000000..c56270b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/pubsub.go
@@ -0,0 +1,629 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+const (
+ pingTimeout = time.Second
+ chanSendTimeout = time.Minute
+)
+
+var errPingTimeout = errors.New("redis: ping timeout")
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+ opt *Options
+
+ newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
+ closeConn func(*pool.Conn) error
+
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+
+ closed bool
+ exit chan struct{}
+
+ cmd *Cmd
+
+ chOnce sync.Once
+ msgCh chan *Message
+ allCh chan interface{}
+ ping chan struct{}
+}
+
+func (c *PubSub) String() string {
+ channels := mapKeys(c.channels)
+ channels = append(channels, mapKeys(c.patterns)...)
+ return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
+}
+
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
+ c.mu.Lock()
+ cn, err := c.conn(ctx, nil)
+ c.mu.Unlock()
+ return cn, err
+}
+
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+ if c.cn != nil {
+ return c.cn, nil
+ }
+
+ channels := mapKeys(c.channels)
+ channels = append(channels, newChannels...)
+
+ cn, err := c.newConn(ctx, channels)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.resubscribe(ctx, cn); err != nil {
+ _ = c.closeConn(cn)
+ return nil, err
+ }
+
+ c.cn = cn
+ return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+ return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+}
+
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
+ var firstErr error
+
+ if len(c.channels) > 0 {
+ firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
+ }
+
+ if len(c.patterns) > 0 {
+ err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+ s := make([]string, len(m))
+ i := 0
+ for k := range m {
+ s[i] = k
+ i++
+ }
+ return s
+}
+
+func (c *PubSub) _subscribe(
+ ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
+) error {
+ args := make([]interface{}, 0, 1+len(channels))
+ args = append(args, redisCmd)
+ for _, channel := range channels {
+ args = append(args, channel)
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ return c.writeCmd(ctx, cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(
+ ctx context.Context,
+ cn *pool.Conn,
+ err error,
+ allowTimeout bool,
+) {
+ c.mu.Lock()
+ c.releaseConn(ctx, cn, err, allowTimeout)
+ c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
+ if c.cn != cn {
+ return
+ }
+ if isBadConn(err, allowTimeout) {
+ c.reconnect(ctx, err)
+ }
+}
+
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
+ _ = c.closeTheCn(reason)
+ _, _ = c.conn(ctx, nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+ if c.cn == nil {
+ return nil
+ }
+ if !c.closed {
+ internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
+ }
+ err := c.closeConn(c.cn)
+ c.cn = nil
+ return err
+}
+
+func (c *PubSub) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.closed = true
+ close(c.exit)
+
+ return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "subscribe", channels...)
+ if c.channels == nil {
+ c.channels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.channels[s] = struct{}{}
+ }
+ return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "psubscribe", patterns...)
+ if c.patterns == nil {
+ c.patterns = make(map[string]struct{})
+ }
+ for _, s := range patterns {
+ c.patterns[s] = struct{}{}
+ }
+ return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ err := c.subscribe(ctx, "unsubscribe", channels...)
+ return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ err := c.subscribe(ctx, "punsubscribe", patterns...)
+ return err
+}
+
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+ cn, err := c.conn(ctx, channels)
+ if err != nil {
+ return err
+ }
+
+ err = c._subscribe(ctx, cn, redisCmd, channels)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
+ args := []interface{}{"ping"}
+ if len(payload) == 1 {
+ args = append(args, payload[0])
+ }
+ cmd := NewCmd(ctx, args...)
+
+ cn, err := c.connWithLock(ctx)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeCmd(ctx, cn, cmd)
+ c.releaseConnWithLock(ctx, cn, err, false)
+ return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+ // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+ Kind string
+ // Channel name we have subscribed to.
+ Channel string
+ // Number of channels we are currently subscribed to.
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+ Channel string
+ Pattern string
+ Payload string
+ PayloadSlice []string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+ Payload string
+}
+
+func (p *Pong) String() string {
+ if p.Payload != "" {
+ return fmt.Sprintf("Pong<%s>", p.Payload)
+ }
+ return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+ switch reply := reply.(type) {
+ case string:
+ return &Pong{
+ Payload: reply,
+ }, nil
+ case []interface{}:
+ switch kind := reply[0].(string); kind {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ // Can be nil in case of "unsubscribe".
+ channel, _ := reply[1].(string)
+ return &Subscription{
+ Kind: kind,
+ Channel: channel,
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message":
+ switch payload := reply[2].(type) {
+ case string:
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: payload,
+ }, nil
+ case []interface{}:
+ ss := make([]string, len(payload))
+ for i, s := range payload {
+ ss[i] = s.(string)
+ }
+ return &Message{
+ Channel: reply[1].(string),
+ PayloadSlice: ss,
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+ }
+ case "pmessage":
+ return &Message{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ case "pong":
+ return &Pong{
+ Payload: reply[1].(string),
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+ }
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
+ if c.cmd == nil {
+ c.cmd = NewCmd(ctx)
+ }
+
+ cn, err := c.connWithLock(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+ return c.cmd.readReply(rd)
+ })
+
+ c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+ return c.ReceiveTimeout(ctx, 0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ return msg, nil
+ default:
+ err := fmt.Errorf("redis: unknown message: %T", msg)
+ return nil, err
+ }
+ }
+}
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel() <-chan *Message {
+ return c.ChannelSize(100)
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+ c.chOnce.Do(func() {
+ c.initPing()
+ c.initMsgChan(size)
+ })
+ if c.msgCh == nil {
+ err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+ panic(err)
+ }
+ if cap(c.msgCh) != size {
+ err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+ panic(err)
+ }
+ return c.msgCh
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
+ c.chOnce.Do(func() {
+ c.initPing()
+ c.initAllChan(size)
+ })
+ if c.allCh == nil {
+ err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+ panic(err)
+ }
+ if cap(c.allCh) != size {
+ err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+ panic(err)
+ }
+ return c.allCh
+}
+
+func (c *PubSub) getContext() context.Context {
+ if c.cmd != nil {
+ return c.cmd.ctx
+ }
+ return context.Background()
+}
+
+func (c *PubSub) initPing() {
+ ctx := context.TODO()
+ c.ping = make(chan struct{}, 1)
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ healthy := true
+ for {
+ timer.Reset(pingTimeout)
+ select {
+ case <-c.ping:
+ healthy = true
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ pingErr := c.Ping(ctx)
+ if healthy {
+ healthy = false
+ } else {
+ if pingErr == nil {
+ pingErr = errPingTimeout
+ }
+ c.mu.Lock()
+ c.reconnect(ctx, pingErr)
+ healthy = true
+ c.mu.Unlock()
+ }
+ case <-c.exit:
+ return
+ }
+ }
+ }()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *PubSub) initMsgChan(size int) {
+ ctx := context.TODO()
+ c.msgCh = make(chan *Message, size)
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.msgCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ timer.Reset(chanSendTimeout)
+ select {
+ case c.msgCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ c.getContext(),
+ "redis: %s channel is full for %s (message is dropped)",
+ c,
+ chanSendTimeout,
+ )
+ }
+ default:
+ internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *PubSub) initAllChan(size int) {
+ ctx := context.TODO()
+ c.allCh = make(chan interface{}, size)
+ go func() {
+ timer := time.NewTimer(pingTimeout)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.allCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ c.sendMessage(msg, timer)
+ case *Pong:
+ // Ignore.
+ case *Message:
+ c.sendMessage(msg, timer)
+ default:
+ internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
+ timer.Reset(pingTimeout)
+ select {
+ case c.allCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ c.getContext(),
+ "redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
new file mode 100644
index 0000000..efad7f1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/redis.go
@@ -0,0 +1,783 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "go.opentelemetry.io/otel/api/trace"
+ "go.opentelemetry.io/otel/label"
+)
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
+ AfterProcess(ctx context.Context, cmd Cmder) error
+
+ BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
+ AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
+}
+
+type hooks struct {
+ hooks []Hook
+}
+
+func (hs *hooks) lock() {
+ hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
+}
+
+func (hs hooks) clone() hooks {
+ clone := hs
+ clone.lock()
+ return clone
+}
+
+func (hs *hooks) AddHook(hook Hook) {
+ hs.hooks = append(hs.hooks, hook)
+}
+
+func (hs hooks) process(
+ ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
+) error {
+ if len(hs.hooks) == 0 {
+ err := hs.withContext(ctx, func() error {
+ return fn(ctx, cmd)
+ })
+ cmd.SetErr(err)
+ return err
+ }
+
+ var hookIndex int
+ var retErr error
+
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
+ if retErr != nil {
+ cmd.SetErr(retErr)
+ }
+ }
+
+ if retErr == nil {
+ retErr = hs.withContext(ctx, func() error {
+ return fn(ctx, cmd)
+ })
+ cmd.SetErr(retErr)
+ }
+
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
+ retErr = err
+ cmd.SetErr(retErr)
+ }
+ }
+
+ return retErr
+}
+
+func (hs hooks) processPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ if len(hs.hooks) == 0 {
+ err := hs.withContext(ctx, func() error {
+ return fn(ctx, cmds)
+ })
+ return err
+ }
+
+ var hookIndex int
+ var retErr error
+
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
+ if retErr != nil {
+ setCmdsErr(cmds, retErr)
+ }
+ }
+
+ if retErr == nil {
+ retErr = hs.withContext(ctx, func() error {
+ return fn(ctx, cmds)
+ })
+ }
+
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
+ retErr = err
+ setCmdsErr(cmds, retErr)
+ }
+ }
+
+ return retErr
+}
+
+func (hs hooks) processTxPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return hs.processPipeline(ctx, cmds, fn)
+}
+
+func (hs hooks) withContext(ctx context.Context, fn func() error) error {
+ done := ctx.Done()
+ if done == nil {
+ return fn()
+ }
+
+ errc := make(chan error, 1)
+ go func() { errc <- fn() }()
+
+ select {
+ case <-done:
+ return ctx.Err()
+ case err := <-errc:
+ return err
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
+ return &baseClient{
+ opt: opt,
+ connPool: connPool,
+ }
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error {
+ return c.initConn(ctx, cn)
+ })
+ if err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := internal.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ if c.opt.Password == "" &&
+ c.opt.DB == 0 &&
+ !c.opt.readOnly &&
+ c.opt.OnConnect == nil {
+ return nil
+ }
+
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(ctx, c.opt, connPool)
+
+ _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if c.opt.Password != "" {
+ if c.opt.Username != "" {
+ pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
+ } else {
+ pipe.Auth(ctx, c.opt.Password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ if span.IsRecording() {
+ if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
+ span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
+ }
+ }
+
+ defer func() {
+ c.releaseConn(ctx, cn, err)
+ }()
+
+ err = fn(ctx, cn)
+ return err
+ })
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ var retry bool
+ err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ retryTimeout := true
+ err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+ if err != nil {
+ return err
+ }
+
+ err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+ if err != nil {
+ retryTimeout = cmd.readTimeout() == nil
+ return err
+ }
+
+ return nil
+ })
+ if err == nil {
+ return nil
+ }
+ retry = shouldRetry(err, retryTimeout)
+ return err
+ })
+ if err == nil || !retry {
+ return err
+ }
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ err := c._generalProcessPipeline(ctx, cmds, p)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) _generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ var canRetry bool
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ })
+ return true, err
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := txPipelineReadQueued(rd, statusCmd, cmds)
+ if err != nil {
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ return false, err
+}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdCopy := make([]Cmder, len(cmds)+2)
+ cmdCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdCopy[1:], cmds)
+ cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdCopy
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+ *baseClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: newBaseClient(opt, newConnPool(opt)),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+
+ return &c
+}
+
+func (c *Client) clone() *Client {
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ return &clone
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := c.clone()
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ return clone
+}
+
+func (c *Client) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := c.clone()
+ clone.ctx = ctx
+ return clone
+}
+
+func (c *Client) Conn(ctx context.Context) *Conn {
+ return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooks // TODO: inherit hooks
+}
+
+// Conn is like Client, but its pool contains single connection.
+type Conn struct {
+ *conn
+ ctx context.Context
+}
+
+func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ conn: &conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ },
+ ctx: ctx,
+ }
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v8/renovate.json b/vendor/github.com/go-redis/redis/v8/renovate.json
new file mode 100644
index 0000000..f45d8f1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/renovate.json
@@ -0,0 +1,5 @@
+{
+ "extends": [
+ "config:base"
+ ]
+}
diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/go-redis/redis/v8/result.go
new file mode 100644
index 0000000..24cfd49
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/result.go
@@ -0,0 +1,180 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing.
+func NewCmdResult(val interface{}, err error) *Cmd {
+ var cmd Cmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+ var cmd SliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
+func NewStatusResult(val string, err error) *StatusCmd {
+ var cmd StatusCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing.
+func NewIntResult(val int64, err error) *IntCmd {
+ var cmd IntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+ var cmd DurationCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
+func NewBoolResult(val bool, err error) *BoolCmd {
+ var cmd BoolCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing.
+func NewStringResult(val string, err error) *StringCmd {
+ var cmd StringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
+func NewFloatResult(val float64, err error) *FloatCmd {
+ var cmd FloatCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+ var cmd StringSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+ var cmd BoolSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+ var cmd StringStringMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+ var cmd StringIntMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+ var cmd TimeCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+ var cmd ZSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+ var cmd ZWithKeyCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+ var cmd ScanCmd
+ cmd.page = keys
+ cmd.cursor = cursor
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+ var cmd ClusterSlotsCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+ var cmd GeoLocationCmd
+ cmd.locations = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+ var cmd GeoPosCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+ var cmd CommandsInfoCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+ var cmd XMessageSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+ var cmd XStreamSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
new file mode 100644
index 0000000..34d05f3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/ring.go
@@ -0,0 +1,731 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/dgryski/go-rendezvous"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+ Get(string) string
+}
+
+type rendezvousWrapper struct {
+ *rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+ return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+ return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // NewClient creates a shard client with provided name and options.
+ NewClient func(name string, opt *Options) *Client
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // NewConsistentHash returns a consistent hash that is used
+ // to distribute keys across the shards.
+ //
+ // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+ // for consistent hashing algorithmic tradeoffs.
+ NewConsistentHash func(shards []string) ConsistentHash
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+ Limiter Limiter
+}
+
+func (opt *RingOptions) init() {
+ if opt.NewClient == nil {
+ opt.NewClient = func(name string, opt *Options) *Client {
+ return NewClient(opt)
+ }
+ }
+
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+
+ if opt.NewConsistentHash == nil {
+ opt.NewConsistentHash = newRendezvous
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+ return &Options{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+ DB: opt.DB,
+
+ MaxRetries: -1,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+
+ TLSConfig: opt.TLSConfig,
+ Limiter: opt.Limiter,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+ Client *Client
+ down int32
+}
+
+func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+
+ return &ringShard{
+ Client: opt.NewClient(name, clopt),
+ }
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringShards struct {
+ opt *RingOptions
+
+ mu sync.RWMutex
+ hash ConsistentHash
+ shards map[string]*ringShard // read only
+ list []*ringShard // read only
+ numShard int
+ closed bool
+}
+
+func newRingShards(opt *RingOptions) *ringShards {
+ shards := make(map[string]*ringShard, len(opt.Addrs))
+ list := make([]*ringShard, 0, len(shards))
+
+ for name, addr := range opt.Addrs {
+ shard := newRingShard(opt, name, addr)
+ shards[name] = shard
+
+ list = append(list, shard)
+ }
+
+ c := &ringShards{
+ opt: opt,
+
+ shards: shards,
+ list: list,
+ }
+ c.rebalance()
+
+ return c
+}
+
+func (c *ringShards) List() []*ringShard {
+ var list []*ringShard
+
+ c.mu.RLock()
+ if !c.closed {
+ list = c.list
+ }
+ c.mu.RUnlock()
+
+ return list
+}
+
+func (c *ringShards) Hash(key string) string {
+ key = hashtag.Key(key)
+
+ var hash string
+
+ c.mu.RLock()
+ if c.numShard > 0 {
+ hash = c.hash.Get(key)
+ }
+ c.mu.RUnlock()
+
+ return hash
+}
+
+func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+
+ if c.closed {
+ c.mu.RUnlock()
+ return nil, pool.ErrClosed
+ }
+
+ if c.numShard == 0 {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ hash := c.hash.Get(key)
+ if hash == "" {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ shard := c.shards[hash]
+ c.mu.RUnlock()
+
+ return shard, nil
+}
+
+func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+ if shardName == "" {
+ return c.Random()
+ }
+
+ c.mu.RLock()
+ shard := c.shards[shardName]
+ c.mu.RUnlock()
+ return shard, nil
+}
+
+func (c *ringShards) Random() (*ringShard, error) {
+ return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *ringShards) Heartbeat(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ ctx := context.Background()
+ for range ticker.C {
+ var rebalance bool
+
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ if rebalance {
+ c.rebalance()
+ }
+ }
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *ringShards) rebalance() {
+ c.mu.RLock()
+ shards := c.shards
+ c.mu.RUnlock()
+
+ liveShards := make([]string, 0, len(shards))
+
+ for name, shard := range shards {
+ if shard.IsUp() {
+ liveShards = append(liveShards, name)
+ }
+ }
+
+ hash := c.opt.NewConsistentHash(liveShards)
+
+ c.mu.Lock()
+ c.hash = hash
+ c.numShard = len(liveShards)
+ c.mu.Unlock()
+}
+
+func (c *ringShards) Len() int {
+ c.mu.RLock()
+ l := c.numShard
+ c.mu.RUnlock()
+ return l
+}
+
+func (c *ringShards) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, shard := range c.shards {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ c.hash = nil
+ c.shards = nil
+ c.list = nil
+
+ return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type ring struct {
+ opt *RingOptions
+ shards *ringShards
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ *ring
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ opt.init()
+
+ ring := Ring{
+ ring: &ring{
+ opt: opt,
+ shards: newRingShards(opt),
+ },
+ ctx: context.Background(),
+ }
+
+ ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+ ring.cmdable = ring.Process
+
+ go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+
+ return &ring
+}
+
+func (c *Ring) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Ring) WithContext(ctx context.Context) *Ring {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ shards := c.shards.List()
+ var acc PoolStats
+ for _, shard := range shards {
+ s := shard.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ }
+ return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.shards.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ shards := c.shards.List()
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(ctx, shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ shards := c.shards.List()
+ var firstErr error
+ for _, shard := range shards {
+ cmdsInfo, err := shard.Client.Command(ctx).Result()
+ if err == nil {
+ return cmdsInfo, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ if firstErr == nil {
+ return nil, errRingShardsDown
+ }
+ return nil, firstErr
+}
+
+func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+ if err != nil {
+ return nil
+ }
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ pos := cmdFirstKeyPos(cmd, cmdInfo)
+ if pos == 0 {
+ return c.shards.Random()
+ }
+ firstKey := cmd.stringArg(pos)
+ return c.shards.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ shard, err := c.cmdShard(ctx, cmd)
+ if err != nil {
+ return err
+ }
+
+ lastErr = shard.Client.Process(ctx, cmd)
+ if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, false)
+ })
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, true)
+ })
+}
+
+func (c *Ring) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, tx bool,
+) error {
+ cmdsMap := make(map[string][]Cmder)
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ if hash != "" {
+ hash = c.shards.Hash(hash)
+ }
+ cmdsMap[hash] = append(cmdsMap[hash], cmd)
+ }
+
+ var wg sync.WaitGroup
+ for hash, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(hash string, cmds []Cmder) {
+ defer wg.Done()
+
+ _ = c.processShardPipeline(ctx, hash, cmds, tx)
+ }(hash, cmds)
+ }
+
+ wg.Wait()
+ return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) processShardPipeline(
+ ctx context.Context, hash string, cmds []Cmder, tx bool,
+) error {
+ // TODO: retry?
+ shard, err := c.shards.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ if tx {
+ return shard.Client.processTxPipeline(ctx, cmds)
+ }
+ return shard.Client.processPipeline(ctx, cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ var shards []*ringShard
+ for _, key := range keys {
+ if key != "" {
+ shard, err := c.shards.GetByKey(hashtag.Key(key))
+ if err != nil {
+ return err
+ }
+
+ shards = append(shards, shard)
+ }
+ }
+
+ if len(shards) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one shard")
+ }
+
+ if len(shards) > 1 {
+ for _, shard := range shards[1:] {
+ if shard.Client != shards[0].Client {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+ return err
+ }
+ }
+ }
+
+ return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ return c.shards.Close()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
new file mode 100644
index 0000000..07ed482
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/script.go
@@ -0,0 +1,65 @@
+package redis
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type scripter interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+ _ scripter = (*Client)(nil)
+ _ scripter = (*Ring)(nil)
+ _ scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c scripter) *StringCmd {
+ return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c scripter) *BoolSliceCmd {
+ return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(ctx, c, keys, args...)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
new file mode 100644
index 0000000..7db9843
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/sentinel.go
@@ -0,0 +1,731 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+ // Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
+ SentinelPassword string
+
+ // Allows routing read-only commands to the closest master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteRandomly bool
+
+ // Route all commands to slave read-only nodes.
+ SlaveOnly bool
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+ return &Options{
+ Addr: addr,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: 0,
+ Password: opt.SentinelPassword,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+ return &ClusterOptions{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRedirects: opt.MaxRetries,
+
+ RouteByLatency: opt.RouteByLatency,
+ RouteRandomly: opt.RouteRandomly,
+
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ if failoverOpt.RouteByLatency {
+ panic("to route commands by latency, use NewFailoverClusterClient")
+ }
+ if failoverOpt.RouteRandomly {
+ panic("to route commands randomly, use NewFailoverClusterClient")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clientOptions()
+ opt.Dialer = masterSlaveDialer(failover)
+ opt.init()
+
+ connPool := newConnPool(opt)
+ failover.onFailover = func(ctx context.Context, addr string) {
+ _ = connPool.Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ }
+
+ c := Client{
+ baseClient: newBaseClient(opt, connPool),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+ c.onClose = failover.Close
+
+ return &c
+}
+
+func masterSlaveDialer(
+ failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
+ var addr string
+ var err error
+
+ if failover.opt.SlaveOnly {
+ addr, err = failover.RandomSlaveAddr(ctx)
+ } else {
+ addr, err = failover.MasterAddr(ctx)
+ if err == nil {
+ failover.trySwitchMaster(ctx, addr)
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ if failover.opt.Dialer != nil {
+ return failover.opt.Dialer(ctx, network, addr)
+ }
+ return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+ *baseClient
+ hooks
+ ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(opt),
+ },
+ ctx: context.Background(),
+ }
+ return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "ping")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+ cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "masters")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+ opt *FailoverOptions
+
+ sentinelAddrs []string
+
+ onFailover func(ctx context.Context, addr string)
+ onUpdate func(ctx context.Context)
+
+ mu sync.RWMutex
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+ addresses, err := c.slaveAddrs(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(addresses) == 0 {
+ return c.MasterAddr(ctx)
+ }
+ return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr := c.getMasterAddr(ctx, sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr := c.getMasterAddr(ctx, c.sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, c.sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addrs := parseSlaveAddrs(slaves)
+ return addrs, nil
+ }
+
+ return []string{}, errors.New("redis: all sentinels are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+ addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ return ""
+ }
+ return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
+ addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ c.opt.MasterName, err)
+ return []string{}
+ }
+ return parseSlaveAddrs(addrs)
+}
+
+func parseSlaveAddrs(addrs []interface{}) []string {
+ nodes := make([]string, 0, len(addrs))
+
+ for _, node := range addrs {
+ ip := ""
+ port := ""
+ flags := []string{}
+ lastkey := ""
+ isDown := false
+
+ for _, key := range node.([]interface{}) {
+ switch lastkey {
+ case "ip":
+ ip = key.(string)
+ case "port":
+ port = key.(string)
+ case "flags":
+ flags = strings.Split(key.(string), ",")
+ }
+ lastkey = key.(string)
+ }
+
+ for _, flag := range flags {
+ switch flag {
+ case "s_down", "o_down", "disconnected":
+ isDown = true
+ }
+ }
+
+ if !isDown {
+ nodes = append(nodes, net.JoinHostPort(ip, port))
+ }
+ }
+
+ return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+ c.mu.RLock()
+ currentAddr := c._masterAddr
+ c.mu.RUnlock()
+
+ if addr == currentAddr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if addr == c._masterAddr {
+ return
+ }
+ c._masterAddr = addr
+
+ internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+ c.opt.MasterName, addr)
+ if c.onFailover != nil {
+ c.onFailover(ctx, addr)
+ }
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels(ctx)
+
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+ sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ if key == "name" {
+ sentinelAddr := vals[i+1].(string)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ctx := context.TODO()
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+
+ ch := pubsub.Channel()
+ for msg := range ch {
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.opt.MasterName {
+ internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.trySwitchMaster(pubsub.getContext(), addr)
+ }
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a slave node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clusterOptions()
+ opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+ masterAddr, err := failover.MasterAddr(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []ClusterNode{{
+ Addr: masterAddr,
+ }}
+
+ slaveAddrs, err := failover.slaveAddrs(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, slaveAddr := range slaveAddrs {
+ nodes = append(nodes, ClusterNode{
+ Addr: slaveAddr,
+ })
+ }
+
+ slots := []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: nodes,
+ },
+ }
+ return slots, nil
+ }
+
+ c := NewClusterClient(opt)
+ failover.onUpdate = func(ctx context.Context) {
+ c.ReloadState(ctx)
+ }
+
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
new file mode 100644
index 0000000..ad825c6
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/tx.go
@@ -0,0 +1,151 @@
+package redis
+
+import (
+ "context"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+// If you don't need WATCH it is better to use Pipeline.
+type Tx struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooks
+ ctx context.Context
+}
+
+func (c *Client) newTx(ctx context.Context) *Tx {
+ tx := Tx{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewStickyConnPool(c.connPool),
+ },
+ hooks: c.hooks.clone(),
+ ctx: ctx,
+ }
+ tx.init()
+ return &tx
+}
+
+func (c *Tx) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+func (c *Tx) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Tx) WithContext(ctx context.Context) *Tx {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.init()
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ tx := c.newTx(ctx)
+ if len(keys) > 0 {
+ if err := tx.Watch(ctx, keys...).Err(); err != nil {
+ _ = tx.Close(ctx)
+ return err
+ }
+ }
+
+ err := fn(tx)
+ _ = tx.Close(ctx)
+ return err
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close(ctx context.Context) error {
+ _ = c.Unwatch(ctx).Err()
+ return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "watch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unwatch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
new file mode 100644
index 0000000..5f0e1e3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/universal.go
@@ -0,0 +1,206 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Common options.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ SentinelPassword string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // The sentinel master name.
+ // Only failover clients.
+ MasterName string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRedirects: o.MaxRedirects,
+ ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+ SentinelPassword: o.SentinelPassword,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// can connect to either clusters, or sentinel-backed failover instances
+// or simple single-instance servers. This can be useful for testing
+// cluster-specific applications locally.
+type UniversalClient interface {
+ Cmdable
+ Context() context.Context
+ AddHook(Hook)
+ Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Subscribe(ctx context.Context, channels ...string) *PubSub
+ PSubscribe(ctx context.Context, channels ...string) *PubSub
+ Close() error
+ PoolStats() *PoolStats
+}
+
+var (
+ _ UniversalClient = (*Client)(nil)
+ _ UniversalClient = (*ClusterClient)(nil)
+ _ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of client returned depends
+// on the following three conditions:
+//
+// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
+// 2. if the number of Addrs is two or more, a ClusterClient will be returned
+// 3. otherwise, a single-node redis Client will be returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts.MasterName != "" {
+ return NewFailoverClient(opts.Failover())
+ } else if len(opts.Addrs) > 1 {
+ return NewClusterClient(opts.Cluster())
+ }
+ return NewClient(opts.Simple())
+}