VOL-4925 - Build and release components.
Misc/
o Bulk copyright notice udpate to 2023.
go.mod
go.sum
------
o Bump component version strings to the latest release.
Cosmetic edit to force a build.
Change-Id: Icc8869463d1f1a4451938466c39fcc3d11ebad73
diff --git a/.golangci.yml b/.golangci.yml
index 08c7e97..e817ff3 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,4 +1,4 @@
-# Copyright 2020-present Open Networking Foundation
+# Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/Makefile b/Makefile
index 1b07af8..aa9d833 100644
--- a/Makefile
+++ b/Makefile
@@ -120,6 +120,7 @@
endif
## -----------------------------------------------------------------------
+## Intent:
## -----------------------------------------------------------------------
docker-push: ## Push the docker images to an external repository
docker push ${RWCORE_IMAGENAME}:${DOCKER_TAG}
diff --git a/VERSION b/VERSION
index 23887f6..c848fb9 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.1.7
+3.1.8
diff --git a/db/model/common.go b/db/model/common.go
index 008d18c..a85c0b5 100644
--- a/db/model/common.go
+++ b/db/model/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/db/model/proxy.go b/db/model/proxy.go
index d9b5f66..4c775d7 100644
--- a/db/model/proxy.go
+++ b/db/model/proxy.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2022 Open Networking Foundation (ONF) and the ONF Contributors
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index b56e328..16c3632 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2022 Open Networking Foundation (ONF) and the ONF Contributors
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/go.mod b/go.mod
index fdf47ba..501d2cc 100644
--- a/go.mod
+++ b/go.mod
@@ -17,8 +17,8 @@
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
github.com/google/uuid v1.3.0
- github.com/opencord/voltha-lib-go/v7 v7.1.8
- github.com/opencord/voltha-protos/v5 v5.2.5
+ github.com/opencord/voltha-lib-go/v7 v7.3.2
+ github.com/opencord/voltha-protos/v5 v5.3.8
github.com/opentracing/opentracing-go v1.2.0
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
github.com/stretchr/testify v1.7.0
diff --git a/go.sum b/go.sum
index 1ba4b60..f0dee1d 100644
--- a/go.sum
+++ b/go.sum
@@ -53,6 +53,8 @@
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
@@ -79,6 +81,8 @@
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-redis/redis/v8 v8.3.4 h1:ZF7juZS2wzxloqMKslTutWJ05IQrnchCSk1HD4d4Vbs=
+github.com/go-redis/redis/v8 v8.3.4/go.mod h1:jszGxBCez8QA1HWSmQxJO9Y82kNibbUmeYhKWrBejTU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -116,6 +120,7 @@
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -193,17 +198,18 @@
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI=
github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
-github.com/opencord/voltha-lib-go/v7 v7.1.8 h1:5k+1Ul+T+gmvM7GONbK1/+YrX4tizAc3REgHoFvug0I=
-github.com/opencord/voltha-lib-go/v7 v7.1.8/go.mod h1:lnwlFfhDVMBg2siCv1CajB1fvfAU9Cs8VbB64LQ8zVg=
-github.com/opencord/voltha-protos/v5 v5.2.2/go.mod h1:ZGcyW79kQKIo7AySo1LRu613E6uiozixrCF0yNB/4x8=
-github.com/opencord/voltha-protos/v5 v5.2.5 h1:FEvUwDMSOvoii1jXCD54HxfEZqb0Gl5dN8DFRd1mGXk=
-github.com/opencord/voltha-protos/v5 v5.2.5/go.mod h1:ZGcyW79kQKIo7AySo1LRu613E6uiozixrCF0yNB/4x8=
+github.com/opencord/voltha-lib-go/v7 v7.3.2 h1:mvQE+HTf3sLXIMulkDQJbbR67lIaV/Y6IIj1co0vrhU=
+github.com/opencord/voltha-lib-go/v7 v7.3.2/go.mod h1:3XnWQBHALGZTm5n3j401zKGG9aL2UqSU3/owGwNmcxM=
+github.com/opencord/voltha-protos/v5 v5.3.8 h1:tL8I1wtOfuMnKMQvgN1Ul+8YL/LTBm0PpNuxU1usGDw=
+github.com/opencord/voltha-protos/v5 v5.3.8/go.mod h1:ZGcyW79kQKIo7AySo1LRu613E6uiozixrCF0yNB/4x8=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
@@ -276,6 +282,8 @@
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v3.3.25+incompatible h1:V1RzkZJj9LqsJRy+TUBgpWSbZXITLB819lstuTFoZOY=
go.etcd.io/etcd v3.3.25+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
+go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA=
+go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@@ -328,6 +336,7 @@
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@@ -360,6 +369,7 @@
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/makefiles/consts.mk b/makefiles/consts.mk
index c34d119..043ec65 100644
--- a/makefiles/consts.mk
+++ b/makefiles/consts.mk
@@ -1,6 +1,6 @@
# -*- makefile -*-
# -----------------------------------------------------------------------
-# Copyright 2022 Open Networking Foundation
+# Copyright 2022-2023 Open Networking Foundation (ONF) and the ONF Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/rw_core/common.go b/rw_core/common.go
index 13a51c7..d712ca0 100644
--- a/rw_core/common.go
+++ b/rw_core/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/config/config.go b/rw_core/config/config.go
index 69e0c0d..c928a84 100644
--- a/rw_core/config/config.go
+++ b/rw_core/config/config.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/adapter/agent.go b/rw_core/core/adapter/agent.go
index 0030d48..973f767 100644
--- a/rw_core/core/adapter/agent.go
+++ b/rw_core/core/adapter/agent.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 Open Networking Foundation (ONF) and the ONF Contributors
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/adapter/endpoint_manager.go b/rw_core/core/adapter/endpoint_manager.go
index dac137c..bcea319 100644
--- a/rw_core/core/adapter/endpoint_manager.go
+++ b/rw_core/core/adapter/endpoint_manager.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/adapter/endpoint_manager_test.go b/rw_core/core/adapter/endpoint_manager_test.go
index 78635d1..63ed57e 100644
--- a/rw_core/core/adapter/endpoint_manager_test.go
+++ b/rw_core/core/adapter/endpoint_manager_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/adapter/manager.go b/rw_core/core/adapter/manager.go
index 9a1ff1c..1ab4341 100644
--- a/rw_core/core/adapter/manager.go
+++ b/rw_core/core/adapter/manager.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/api/common.go b/rw_core/core/api/common.go
index 13294eb..8185299 100644
--- a/rw_core/core/api/common.go
+++ b/rw_core/core/api/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/api/grpc_nbi_handler.go b/rw_core/core/api/grpc_nbi_handler.go
index 8adcdc5..d18911b 100755
--- a/rw_core/core/api/grpc_nbi_handler.go
+++ b/rw_core/core/api/grpc_nbi_handler.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/common.go b/rw_core/core/common.go
index b101d8a..d2d6f6e 100644
--- a/rw_core/core/common.go
+++ b/rw_core/core/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/core.go b/rw_core/core/core.go
index 58c6968..0df3b4e 100644
--- a/rw_core/core/core.go
+++ b/rw_core/core/core.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent.go b/rw_core/core/device/agent.go
index 2c69b28..499124c 100755
--- a/rw_core/core/device/agent.go
+++ b/rw_core/core/device/agent.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_device_update.go b/rw_core/core/device/agent_device_update.go
index 599c2cf..7cea7a1 100644
--- a/rw_core/core/device/agent_device_update.go
+++ b/rw_core/core/device/agent_device_update.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_flow.go b/rw_core/core/device/agent_flow.go
index 982e644..85c92f1 100644
--- a/rw_core/core/device/agent_flow.go
+++ b/rw_core/core/device/agent_flow.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2022 Open Networking Foundation (ONF) and the ONF Contributors
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_group.go b/rw_core/core/device/agent_group.go
index ad95e9c..245c758 100644
--- a/rw_core/core/device/agent_group.go
+++ b/rw_core/core/device/agent_group.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_image.go b/rw_core/core/device/agent_image.go
index 7c86e08..b27f47e 100644
--- a/rw_core/core/device/agent_image.go
+++ b/rw_core/core/device/agent_image.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_pm_config.go b/rw_core/core/device/agent_pm_config.go
index 0e5bf7c..47c4a0e 100644
--- a/rw_core/core/device/agent_pm_config.go
+++ b/rw_core/core/device/agent_pm_config.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_port.go b/rw_core/core/device/agent_port.go
index bdb678e..937c46c 100644
--- a/rw_core/core/device/agent_port.go
+++ b/rw_core/core/device/agent_port.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_test.go b/rw_core/core/device/agent_test.go
index 354f306..076438c 100755
--- a/rw_core/core/device/agent_test.go
+++ b/rw_core/core/device/agent_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/agent_transient_state.go b/rw_core/core/device/agent_transient_state.go
index 51d08ff..4171bc4 100644
--- a/rw_core/core/device/agent_transient_state.go
+++ b/rw_core/core/device/agent_transient_state.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/common.go b/rw_core/core/device/common.go
index bad2067..f0875de 100644
--- a/rw_core/core/device/common.go
+++ b/rw_core/core/device/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/event/common.go b/rw_core/core/device/event/common.go
index 7a0ca19..ee45842 100644
--- a/rw_core/core/device/event/common.go
+++ b/rw_core/core/device/event/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/event/event.go b/rw_core/core/device/event/event.go
index 5d8ccda..d783970 100644
--- a/rw_core/core/device/event/event.go
+++ b/rw_core/core/device/event/event.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/extension_manager.go b/rw_core/core/device/extension_manager.go
index a895c34..55bfee9 100644
--- a/rw_core/core/device/extension_manager.go
+++ b/rw_core/core/device/extension_manager.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/flow/cache.go b/rw_core/core/device/flow/cache.go
index 81efe0d..c600c21 100644
--- a/rw_core/core/device/flow/cache.go
+++ b/rw_core/core/device/flow/cache.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/flow/cache_test.go b/rw_core/core/device/flow/cache_test.go
index d0d8049..444d2b5 100644
--- a/rw_core/core/device/flow/cache_test.go
+++ b/rw_core/core/device/flow/cache_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/flow/common.go b/rw_core/core/device/flow/common.go
index 5765c5d..ab78b13 100644
--- a/rw_core/core/device/flow/common.go
+++ b/rw_core/core/device/flow/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/group/cache.go b/rw_core/core/device/group/cache.go
index b182914..49686ca 100644
--- a/rw_core/core/device/group/cache.go
+++ b/rw_core/core/device/group/cache.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent.go b/rw_core/core/device/logical_agent.go
index b0eaf38..7a69722 100644
--- a/rw_core/core/device/logical_agent.go
+++ b/rw_core/core/device/logical_agent.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent_flow.go b/rw_core/core/device/logical_agent_flow.go
index eda4fa6..c2dba5a 100644
--- a/rw_core/core/device/logical_agent_flow.go
+++ b/rw_core/core/device/logical_agent_flow.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent_group.go b/rw_core/core/device/logical_agent_group.go
index cbf296b..261b0a5 100644
--- a/rw_core/core/device/logical_agent_group.go
+++ b/rw_core/core/device/logical_agent_group.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent_meter.go b/rw_core/core/device/logical_agent_meter.go
index d6e949a..849d3b7 100644
--- a/rw_core/core/device/logical_agent_meter.go
+++ b/rw_core/core/device/logical_agent_meter.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent_meter_helpers.go b/rw_core/core/device/logical_agent_meter_helpers.go
index 16f7302..aade49f 100644
--- a/rw_core/core/device/logical_agent_meter_helpers.go
+++ b/rw_core/core/device/logical_agent_meter_helpers.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent_port.go b/rw_core/core/device/logical_agent_port.go
index 35f18b9..719b6b8 100644
--- a/rw_core/core/device/logical_agent_port.go
+++ b/rw_core/core/device/logical_agent_port.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent_route.go b/rw_core/core/device/logical_agent_route.go
index 333efa8..88cf105 100644
--- a/rw_core/core/device/logical_agent_route.go
+++ b/rw_core/core/device/logical_agent_route.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_agent_test.go b/rw_core/core/device/logical_agent_test.go
index 19ed77c..8c9c8ee 100644
--- a/rw_core/core/device/logical_agent_test.go
+++ b/rw_core/core/device/logical_agent_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_manager.go b/rw_core/core/device/logical_manager.go
index 3f08195..9f03746 100644
--- a/rw_core/core/device/logical_manager.go
+++ b/rw_core/core/device/logical_manager.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_port/common.go b/rw_core/core/device/logical_port/common.go
index c8b3538..6aebd68 100644
--- a/rw_core/core/device/logical_port/common.go
+++ b/rw_core/core/device/logical_port/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/logical_port/loader.go b/rw_core/core/device/logical_port/loader.go
index c6f272d..61935aa 100644
--- a/rw_core/core/device/logical_port/loader.go
+++ b/rw_core/core/device/logical_port/loader.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/manager.go b/rw_core/core/device/manager.go
index b48e603..879eaef 100755
--- a/rw_core/core/device/manager.go
+++ b/rw_core/core/device/manager.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/manager_sbi.go b/rw_core/core/device/manager_sbi.go
index c0e1b6b..5f1c01c 100644
--- a/rw_core/core/device/manager_sbi.go
+++ b/rw_core/core/device/manager_sbi.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/manager_state_callback.go b/rw_core/core/device/manager_state_callback.go
index b833250..4dc5b7e 100644
--- a/rw_core/core/device/manager_state_callback.go
+++ b/rw_core/core/device/manager_state_callback.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/manager_test.go_todo b/rw_core/core/device/manager_test.go_todo
index e0156bb..ef81a13 100644
--- a/rw_core/core/device/manager_test.go_todo
+++ b/rw_core/core/device/manager_test.go_todo
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/meter/common.go b/rw_core/core/device/meter/common.go
index 7ecb52c..c9ec48f 100644
--- a/rw_core/core/device/meter/common.go
+++ b/rw_core/core/device/meter/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/meter/loader.go b/rw_core/core/device/meter/loader.go
index 52cdb9a..941a23c 100644
--- a/rw_core/core/device/meter/loader.go
+++ b/rw_core/core/device/meter/loader.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
diff --git a/rw_core/core/device/mock_kafka.go b/rw_core/core/device/mock_kafka.go
index 68c8aa5..752265c 100644
--- a/rw_core/core/device/mock_kafka.go
+++ b/rw_core/core/device/mock_kafka.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/port/common.go b/rw_core/core/device/port/common.go
index c8b3538..6aebd68 100644
--- a/rw_core/core/device/port/common.go
+++ b/rw_core/core/device/port/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/port/loader.go b/rw_core/core/device/port/loader.go
index 823d1dd..2f322ef 100644
--- a/rw_core/core/device/port/loader.go
+++ b/rw_core/core/device/port/loader.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/state/common.go b/rw_core/core/device/state/common.go
index 43d6419..6081f75 100644
--- a/rw_core/core/device/state/common.go
+++ b/rw_core/core/device/state/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/state/transitions.go b/rw_core/core/device/state/transitions.go
index 6fc837e..e1d42fd 100644
--- a/rw_core/core/device/state/transitions.go
+++ b/rw_core/core/device/state/transitions.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2022 Open Networking Foundation (ONF) and the ONF Contributors
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/state/transitions_test.go b/rw_core/core/device/state/transitions_test.go
index 2c1775d..4e81ee2 100644
--- a/rw_core/core/device/state/transitions_test.go
+++ b/rw_core/core/device/state/transitions_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/transientstate/common.go b/rw_core/core/device/transientstate/common.go
index 920c567..0284d8d 100644
--- a/rw_core/core/device/transientstate/common.go
+++ b/rw_core/core/device/transientstate/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/device/transientstate/loader.go b/rw_core/core/device/transientstate/loader.go
index e953b91..86c6019 100644
--- a/rw_core/core/device/transientstate/loader.go
+++ b/rw_core/core/device/transientstate/loader.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/core/kv.go b/rw_core/core/kv.go
index 222ac29..9e97c86 100644
--- a/rw_core/core/kv.go
+++ b/rw_core/core/kv.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-2022 Open Networking Foundation (ONF) and the ONF Contributors
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/flowdecomposition/common.go b/rw_core/flowdecomposition/common.go
index b777fcc..5e340cf 100644
--- a/rw_core/flowdecomposition/common.go
+++ b/rw_core/flowdecomposition/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/flowdecomposition/flow_decomposer.go b/rw_core/flowdecomposition/flow_decomposer.go
index 8518a3b..905d649 100644
--- a/rw_core/flowdecomposition/flow_decomposer.go
+++ b/rw_core/flowdecomposition/flow_decomposer.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/flowdecomposition/flow_decomposer_test.go b/rw_core/flowdecomposition/flow_decomposer_test.go
index b3e3a9f..8274c83 100644
--- a/rw_core/flowdecomposition/flow_decomposer_test.go
+++ b/rw_core/flowdecomposition/flow_decomposer_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/main.go b/rw_core/main.go
index 715285d..7268c56 100644
--- a/rw_core/main.go
+++ b/rw_core/main.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/mocks/adapter.go b/rw_core/mocks/adapter.go
index 5a9130e..7b8cbf2 100644
--- a/rw_core/mocks/adapter.go
+++ b/rw_core/mocks/adapter.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/mocks/adapter_olt.go b/rw_core/mocks/adapter_olt.go
index 3d4965c..8697dd1 100644
--- a/rw_core/mocks/adapter_olt.go
+++ b/rw_core/mocks/adapter_olt.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/mocks/adapter_onu.go b/rw_core/mocks/adapter_onu.go
index dcfc60c..06289b3 100644
--- a/rw_core/mocks/adapter_onu.go
+++ b/rw_core/mocks/adapter_onu.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/mocks/common.go b/rw_core/mocks/common.go
index 1de8a1f..cbf8b33 100644
--- a/rw_core/mocks/common.go
+++ b/rw_core/mocks/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/profile.go b/rw_core/profile.go
index e333278..8a2fefd 100644
--- a/rw_core/profile.go
+++ b/rw_core/profile.go
@@ -1,7 +1,7 @@
// +build profile
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/release.go b/rw_core/release.go
index 1fbfc2b..29032c0 100644
--- a/rw_core/release.go
+++ b/rw_core/release.go
@@ -1,7 +1,7 @@
// +build !profile
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/route/common.go b/rw_core/route/common.go
index 3bc0e5c..d59f130 100644
--- a/rw_core/route/common.go
+++ b/rw_core/route/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/route/device_route.go b/rw_core/route/device_route.go
index e57d2e6..4e7efb8 100644
--- a/rw_core/route/device_route.go
+++ b/rw_core/route/device_route.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 Open Networking Foundation (ONF) and the ONF Contributors
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/route/device_route_test.go b/rw_core/route/device_route_test.go
index 2ee4718..f641678 100644
--- a/rw_core/route/device_route_test.go
+++ b/rw_core/route/device_route_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/test/common.go b/rw_core/test/common.go
index 7f12841..66457ed 100644
--- a/rw_core/test/common.go
+++ b/rw_core/test/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/test/common_test.go b/rw_core/test/common_test.go
index d6cabda..b3de0c3 100644
--- a/rw_core/test/common_test.go
+++ b/rw_core/test/common_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/test/core_nbi_handler_multi_test.go b/rw_core/test/core_nbi_handler_multi_test.go
index f6a2ec7..88b00eb 100755
--- a/rw_core/test/core_nbi_handler_multi_test.go
+++ b/rw_core/test/core_nbi_handler_multi_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/test/utils.go b/rw_core/test/utils.go
index d86ecff..f2bf3e0 100644
--- a/rw_core/test/utils.go
+++ b/rw_core/test/utils.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/utils/common.go b/rw_core/utils/common.go
index 50581a5..9dcdc2f 100644
--- a/rw_core/utils/common.go
+++ b/rw_core/utils/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/utils/core_utils.go b/rw_core/utils/core_utils.go
index 0316ffa..7e1eb87 100644
--- a/rw_core/utils/core_utils.go
+++ b/rw_core/utils/core_utils.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/utils/core_utils_test.go b/rw_core/utils/core_utils_test.go
index e55b38c..9d5ed8e 100644
--- a/rw_core/utils/core_utils_test.go
+++ b/rw_core/utils/core_utils_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2019-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/utils/id.go b/rw_core/utils/id.go
index 3bf6d19..64501fe 100644
--- a/rw_core/utils/id.go
+++ b/rw_core/utils/id.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-2023 Open Networking Foundation (ONF) and the ONF Contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/utils/request_queue.go b/rw_core/utils/request_queue.go
index c96a6a0..68f3034 100644
--- a/rw_core/utils/request_queue.go
+++ b/rw_core/utils/request_queue.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/rw_core/utils/request_queue_test.go b/rw_core/utils/request_queue_test.go
index 007d375..e9f2ed7 100644
--- a/rw_core/utils/request_queue_test.go
+++ b/rw_core/utils/request_queue_test.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2023 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE
new file mode 100644
index 0000000..22080f7
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go
new file mode 100644
index 0000000..7a6f820
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go
@@ -0,0 +1,79 @@
+package rendezvous
+
+type Rendezvous struct {
+ nodes map[string]int
+ nstr []string
+ nhash []uint64
+ hash Hasher
+}
+
+type Hasher func(s string) uint64
+
+func New(nodes []string, hash Hasher) *Rendezvous {
+ r := &Rendezvous{
+ nodes: make(map[string]int, len(nodes)),
+ nstr: make([]string, len(nodes)),
+ nhash: make([]uint64, len(nodes)),
+ hash: hash,
+ }
+
+ for i, n := range nodes {
+ r.nodes[n] = i
+ r.nstr[i] = n
+ r.nhash[i] = hash(n)
+ }
+
+ return r
+}
+
+func (r *Rendezvous) Lookup(k string) string {
+ // short-circuit if we're empty
+ if len(r.nodes) == 0 {
+ return ""
+ }
+
+ khash := r.hash(k)
+
+ var midx int
+ var mhash = xorshiftMult64(khash ^ r.nhash[0])
+
+ for i, nhash := range r.nhash[1:] {
+ if h := xorshiftMult64(khash ^ nhash); h > mhash {
+ midx = i + 1
+ mhash = h
+ }
+ }
+
+ return r.nstr[midx]
+}
+
+func (r *Rendezvous) Add(node string) {
+ r.nodes[node] = len(r.nstr)
+ r.nstr = append(r.nstr, node)
+ r.nhash = append(r.nhash, r.hash(node))
+}
+
+func (r *Rendezvous) Remove(node string) {
+ // find index of node to remove
+ nidx := r.nodes[node]
+
+ // remove from the slices
+ l := len(r.nstr)
+ r.nstr[nidx] = r.nstr[l]
+ r.nstr = r.nstr[:l]
+
+ r.nhash[nidx] = r.nhash[l]
+ r.nhash = r.nhash[:l]
+
+ // update the map
+ delete(r.nodes, node)
+ moved := r.nstr[nidx]
+ r.nodes[moved] = nidx
+}
+
+func xorshiftMult64(x uint64) uint64 {
+ x ^= x >> 12 // a
+ x ^= x << 25 // b
+ x ^= x >> 27 // c
+ return x * 2685821657736338717
+}
diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore
new file mode 100644
index 0000000..b975a7b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.gitignore
@@ -0,0 +1,3 @@
+*.rdb
+testdata/*/
+.idea/
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
new file mode 100644
index 0000000..1e8d238
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -0,0 +1,21 @@
+run:
+ concurrency: 8
+ deadline: 5m
+ tests: false
+linters:
+ enable-all: true
+ disable:
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - goconst
+ - godox
+ - gosec
+ - maligned
+ - wsl
+ - gomnd
+ - goerr113
+ - exhaustive
+ - nestif
+ - nlreturn
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc
new file mode 100644
index 0000000..8b7f044
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.prettierrc
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/vendor/github.com/go-redis/redis/v8/.travis.yml b/vendor/github.com/go-redis/redis/v8/.travis.yml
new file mode 100644
index 0000000..1bf578d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.travis.yml
@@ -0,0 +1,20 @@
+dist: xenial
+language: go
+
+services:
+ - redis-server
+
+go:
+ - 1.14.x
+ - 1.15.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+go_import_path: github.com/go-redis/redis
+
+before_install:
+ - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
+ -b $(go env GOPATH)/bin v1.31.0
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
new file mode 100644
index 0000000..8392d54
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -0,0 +1,5 @@
+# Changelog
+
+> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+
+See https://redis.uptrace.dev/changelog/
diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE
new file mode 100644
index 0000000..298bed9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
new file mode 100644
index 0000000..49e4c96
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -0,0 +1,21 @@
+all: testdeps
+ go test ./...
+ go test ./... -short -race
+ go test ./... -run=NONE -bench=. -benchmem
+ env GOOS=linux GOARCH=386 go test ./...
+ go vet
+ golangci-lint run
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
new file mode 100644
index 0000000..da5d0fb
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -0,0 +1,137 @@
+# Redis client for Golang
+
+[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
+
+> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+
+- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
+- [Documentation](https://redis.uptrace.dev)
+- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
+- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
+
+## Ecosystem
+
+- [Distributed Locks](https://github.com/bsm/redislock).
+- [Redis Cache](https://github.com/go-redis/cache).
+- [Rate limiting](https://github.com/go-redis/redis_rate).
+
+## Features
+
+- Redis 3 commands except QUIT, MONITOR, and SYNC.
+- Automatic connection pooling with
+ [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
+- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
+ [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
+- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
+- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
+- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
+ without using cluster mode and Redis Sentinel.
+- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+And then install go-redis (note _v8_ in the import; omitting it is a popular mistake):
+
+```shell
+go get github.com/go-redis/redis/v8
+```
+
+## Quickstart
+
+```go
+import (
+ "context"
+ "github.com/go-redis/redis/v8"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "localhost:6379",
+ Password: "", // no password set
+ DB: 0, // use default DB
+ })
+
+ err := rdb.Set(ctx, "key", "value", 0).Err()
+ if err != nil {
+ panic(err)
+ }
+
+ val, err := rdb.Get(ctx, "key").Result()
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("key", val)
+
+ val2, err := rdb.Get(ctx, "key2").Result()
+ if err == redis.Nil {
+ fmt.Println("key2 does not exist")
+ } else if err != nil {
+ panic(err)
+ } else {
+ fmt.Println("key2", val2)
+ }
+ // Output: key value
+ // key2 does not exist
+}
+```
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+ Min: "-inf",
+ Max: "+inf",
+ Offset: 0,
+ Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+ Keys: []string{"zset1", "zset2"},
+ Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## See also
+
+- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
+- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
+- [Golang msgpack](https://github.com/vmihailenco/msgpack)
+- [Golang message task queue](https://github.com/vmihailenco/taskq)
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
new file mode 100644
index 0000000..a6ce5c5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/cluster.go
@@ -0,0 +1,1697 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "net"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // NewClient creates a cluster node client with provided name and options.
+ NewClient func(opt *Options) *Client
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 3 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolSize applies per cluster node and not for the whole cluster.
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 3
+ }
+
+ if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.NumCPU()
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ if opt.MaxRetries == 0 {
+ opt.MaxRetries = -1
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+
+ if opt.NewClient == nil {
+ opt.NewClient = NewClient
+ }
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ const disableIdleCheck = -1
+
+ return &Options{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: disableIdleCheck,
+
+ readOnly: opt.ReadOnly,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ failing uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: clOpt.NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+ const numProbe = 10
+ var dur uint64
+
+ for i := 0; i < numProbe; i++ {
+ time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+ start := time.Now()
+ n.Client.Ping(context.TODO())
+ dur += uint64(time.Since(start) / time.Microsecond)
+ }
+
+ latency := float64(dur) / float64(numProbe)
+ atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+ atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+ const timeout = 15 // 15 seconds
+
+ failing := atomic.LoadUint32(&n.failing)
+ if failing == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(failing) < timeout {
+ return true
+ }
+ atomic.StoreUint32(&n.failing, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ addrs []string
+ nodes map[string]*clusterNode
+ activeAddrs []string
+ closed bool
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ addrs: opt.Addrs,
+ nodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.nodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.nodes = nil
+ c.activeAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+ c.mu.RLock()
+ closed := c.closed
+ if !closed {
+ if len(c.activeAddrs) > 0 {
+ addrs = c.activeAddrs
+ } else {
+ addrs = c.addrs
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ //nolint:prealloc
+ var collected []*clusterNode
+
+ c.mu.Lock()
+
+ c.activeAddrs = c.activeAddrs[:0]
+ for addr, node := range c.nodes {
+ if node.Generation() >= generation {
+ c.activeAddrs = append(c.activeAddrs, addr)
+ if c.opt.RouteByLatency {
+ go node.updateLatency()
+ }
+ continue
+ }
+
+ delete(c.nodes, addr)
+ collected = append(collected, node)
+ }
+
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+ node, err := c.get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.nodes[addr]
+ if ok {
+ return node, nil
+ }
+
+ node = newClusterNode(c.opt, addr)
+
+ c.addrs = appendIfNotExists(c.addrs, addr)
+ c.nodes[addr] = node
+
+ return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.nodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.nodes))
+ for _, node := range c.nodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.Get(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Failing() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Failing() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var node *clusterNode
+ for _, n := range nodes {
+ if n.Failing() {
+ continue
+ }
+ if node == nil || n.Latency() < node.Latency() {
+ node = n
+ }
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ // If all nodes are failing - return random node
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+ n := rand.Intn(len(nodes))
+ return nodes[n], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func(ctx context.Context) (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+ state, err := c.load(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload(ctx context.Context) {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload(ctx)
+ if err != nil {
+ return
+ }
+ time.Sleep(200 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+ v := c.state.Load()
+ if v != nil {
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload(ctx)
+ }
+ return state, nil
+ }
+ return c.Reload(ctx)
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+ state, err := c.Reload(ctx)
+ if err == nil {
+ return state, nil
+ }
+ return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+type clusterClient struct {
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder //nolint:structcheck
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ *clusterClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ clusterClient: &clusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ },
+ ctx: context.Background(),
+ }
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+ c.cmdable = c.Process
+
+ if opt.IdleCheckFrequency > 0 {
+ go c.reaper(opt.IdleCheckFrequency)
+ }
+
+ return c
+}
+
+func (c *ClusterClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+ c.state.LazyReload(ctx)
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ slot := c.cmdSlot(cmd)
+
+ var node *clusterNode
+ var ask bool
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ if node == nil {
+ var err error
+ node, err = c.cmdNode(ctx, cmdInfo, slot)
+ if err != nil {
+ return err
+ }
+ }
+
+ if ask {
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+ _ = pipe.Process(ctx, cmd)
+ _, lastErr = pipe.Exec(ctx)
+ _ = pipe.Close()
+ ask = false
+ } else {
+ lastErr = node.Client.Process(ctx, cmd)
+ }
+
+ // If there is no error - we are done.
+ if lastErr == nil {
+ return nil
+ }
+ if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload(ctx)
+ }
+ node = nil
+ continue
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && isLoadingError(lastErr) {
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ var moved bool
+ var addr string
+ moved, ask, addr = isMovedError(lastErr)
+ if moved || ask {
+ var err error
+ node, err = c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try another node.
+ node.MarkAsFailing()
+ node = nil
+ continue
+ }
+
+ return lastErr
+ }
+ return lastErr
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ state, err := c.state.ReloadOrGet(ctx)
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(ctx, node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get(context.TODO())
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ for _, idx := range rand.Perm(len(addrs)) {
+ addr := addrs[idx]
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots(ctx).Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ /*
+ * No node is connectable. It's possible that all nodes' IP has changed.
+ * Clear activeAddrs to let client be able to re-connect using the initial
+ * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+ * which might have chance to resolve domain name and get updated IP address.
+ */
+ c.nodes.mu.Lock()
+ c.nodes.activeAddrs = nil
+ c.nodes.mu.Unlock()
+
+ return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+ ticker := time.NewTicker(idleCheckFrequency)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ nodes, err := c.nodes.All()
+ if err != nil {
+ break
+ }
+
+ for _, node := range nodes {
+ _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
+ }
+ }
+ }
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
+}
+
+func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+ err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return err
+ }
+
+ if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+ }
+
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ cmdsMap.Add(node, cmd)
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) _processPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ ctx context.Context,
+ node *clusterNode,
+ rd *proto.Reader,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+ continue
+ }
+
+ if c.opt.ReadOnly && isLoadingError(err) {
+ node.MarkAsFailing()
+ return err
+ }
+ if isRedisError(err) {
+ continue
+ }
+ return err
+ }
+ return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+ ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := isMovedError(err)
+ if !moved && !ask {
+ return false
+ }
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return false
+ }
+
+ if moved {
+ c.state.LazyReload(ctx)
+ failedCmds.Add(node, cmd)
+ return true
+ }
+
+ if ask {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ return true
+ }
+
+ panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+}
+
+func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
+ if err == nil {
+ return
+ }
+ if attempt < c.opt.MaxRedirects {
+ if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+ setCmdsErr(cmds, err)
+ }
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) _processTxPipelineNode(
+ ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return err
+ }
+
+ return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
+ if err != nil {
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
+ }
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ })
+ })
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ ctx context.Context,
+ rd *proto.Reader,
+ statusCmd *StatusCmd,
+ cmds []Cmder,
+ failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+ continue
+ }
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ return fmt.Errorf("redis: expected '*', but got line %q", line)
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+ ctx context.Context, cmds []Cmder,
+ moved, ask bool,
+ addr string,
+ failedCmds *cmdsMap,
+) error {
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+
+ if moved {
+ c.state.LazyReload(ctx)
+ for _, cmd := range cmds {
+ failedCmds.Add(node, cmd)
+ }
+ return nil
+ }
+
+ if ask {
+ for _, cmd := range cmds {
+ failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ err = node.Client.Watch(ctx, fn, keys...)
+ if err == nil {
+ break
+ }
+
+ moved, ask, addr := isMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.Get(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+ if isReadOnly {
+ c.state.LazyReload(ctx)
+ }
+ node, err = c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if shouldRetry(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(ctx, slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn(context.TODO())
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ // Try 3 random nodes.
+ const nodeLimit = 3
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+
+ perm := rand.Perm(len(addrs))
+ if len(perm) > nodeLimit {
+ perm = perm[:nodeLimit]
+ }
+
+ for _, idx := range perm {
+ addr := addrs[idx]
+
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ info, err := node.Client.Command(ctx).Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ if firstErr == nil {
+ panic("not reached")
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+ if err != nil {
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && args[1] == "getkeysinslot" {
+ return args[2].(int)
+ }
+
+ cmdInfo := c.cmdInfo(cmd.Name())
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+ ctx context.Context,
+ cmdInfo *CommandInfo,
+ slot int,
+) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if (c.opt.RouteByLatency || c.opt.RouteRandomly) && cmdInfo != nil && cmdInfo.ReadOnly {
+ return c.slotReadOnlyNode(state, slot)
+ }
+ return state.slotMasterNode(slot)
+}
+
+func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+ if c.opt.RouteByLatency {
+ return state.slotClosestNode(slot)
+ }
+ if c.opt.RouteRandomly {
+ return state.slotRandomNode(slot)
+ }
+ return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return state.slotMasterNode(slot)
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+ m.mu.Lock()
+ m.m[node] = append(m.m[node], cmds...)
+ m.mu.Unlock()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
new file mode 100644
index 0000000..1f0bae0
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
@@ -0,0 +1,25 @@
+package redis
+
+import (
+ "context"
+ "sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ return cmd
+ }
+ cmd.val = size
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
new file mode 100644
index 0000000..5dd5533
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/command.go
@@ -0,0 +1,2396 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+type Cmder interface {
+ Name() string
+ FullName() string
+ Args() []interface{}
+ String() string
+ stringArg(int) string
+ firstKeyPos() int8
+ setFirstKeyPos(int8)
+
+ readTimeout() *time.Duration
+ readReply(rd *proto.Reader) error
+
+ SetErr(error)
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.SetErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := writeCmd(wr, cmd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+ return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ if pos := cmd.firstKeyPos(); pos != 0 {
+ return int(pos)
+ }
+
+ switch cmd.Name() {
+ case "eval", "evalsha":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ case "memory":
+ // https://github.com/redis/redis/issues/7493
+ if cmd.stringArg(1) == "usage" {
+ return 2
+ }
+ }
+
+ if info != nil {
+ return int(info.FirstKeyPos)
+ }
+ return 0
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ b := make([]byte, 0, 64)
+
+ for i, arg := range cmd.Args() {
+ if i > 0 {
+ b = append(b, ' ')
+ }
+ b = internal.AppendArg(b, arg)
+ }
+
+ if err := cmd.Err(); err != nil {
+ b = append(b, ": "...)
+ b = append(b, err.Error()...)
+ } else if val != nil {
+ b = append(b, ": "...)
+ b = internal.AppendArg(b, val)
+ }
+
+ return internal.String(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ ctx context.Context
+ args []interface{}
+ err error
+ keyPos int8
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd.args) == 0 {
+ return ""
+ }
+ // Cmd name must be lower cased.
+ return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+ switch name := cmd.Name(); name {
+ case "cluster", "command":
+ if len(cmd.args) == 1 {
+ return name
+ }
+ if s2, ok := cmd.args[1].(string); ok {
+ return name + " " + s2
+ }
+ return name
+ default:
+ return name
+ }
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd.args) {
+ return ""
+ }
+ s, _ := cmd.args[pos].(string)
+ return s
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+ return cmd.keyPos
+}
+
+func (cmd *baseCmd) setFirstKeyPos(keyPos int8) {
+ cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+ cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *Cmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadReply(sliceParser)
+ return err
+}
+
+// sliceParser implements proto.MultiBulkParse.
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, n)
+ for i := 0; i < len(vals); i++ {
+ v, err := rd.ReadReply(sliceParser)
+ if err != nil {
+ if err == Nil {
+ vals[i] = nil
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ vals[i] = err
+ continue
+ }
+ return nil, err
+ }
+ vals[i] = v
+ }
+ return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadArrayReply(sliceParser)
+ if err != nil {
+ return err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+ return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadIntReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+ baseCmd
+
+ val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+ return &IntSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+ return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]int64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ num, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = num
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return err
+ }
+ switch n {
+ // -2 if the key does not exist
+ // -1 if the key exists but has no associated expire
+ case -2, -1:
+ cmd.val = time.Duration(n)
+ default:
+ cmd.val = time.Duration(n) * cmd.precision
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d elements, expected 2", n)
+ }
+
+ sec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ microsec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val = time.Unix(sec, microsec*1000)
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadReply(nil)
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ if err == Nil {
+ cmd.val = false
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ switch v := v.(type) {
+ case int64:
+ cmd.val = v == 1
+ return nil
+ case string:
+ cmd.val = v == "OK"
+ return nil
+ default:
+ return fmt.Errorf("got %T, wanted int64 or string", v)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+ if cmd.err != nil {
+ return time.Time{}, cmd.err
+ }
+ return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadString()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.val, err = rd.ReadFloatReply()
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]string, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.val[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.val[i] = s
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]bool, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[i] = n == 1
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = value
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
+ return &StringIntMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]int64, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[key] = n
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]struct{}, n)
+ for i := int64(0); i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ cmd.val[key] = struct{}{}
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+ var err error
+ cmd.val, err = readXMessageSlice(rd)
+ return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs := make([]XMessage, n)
+ for i := 0; i < n; i++ {
+ var err error
+ msgs[i], err = readXMessage(rd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return XMessage{}, err
+ }
+ if n != 2 {
+ return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return XMessage{}, err
+ }
+
+ var values map[string]interface{}
+
+ v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+ if err != nil {
+ if err != proto.Nil {
+ return XMessage{}, err
+ }
+ } else {
+ values = v.(map[string]interface{})
+ }
+
+ return XMessage{
+ ID: id,
+ Values: values,
+ }, nil
+}
+
+// stringInterfaceMapParser implements proto.MultiBulkParse.
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]interface{}, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XStream, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ stream, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, err := readXMessageSlice(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = XStream{
+ Stream: stream,
+ Messages: msgs,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ count, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ lower, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ higher, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = &XPending{
+ Count: count,
+ Lower: lower,
+ Higher: higher,
+ }
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ for i := int64(0); i < n; i++ {
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ if cmd.val.Consumers == nil {
+ cmd.val.Consumers = make(map[string]int64)
+ }
+ cmd.val.Consumers[consumerName] = consumerPending
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ ID string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]XPendingExt, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumer, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ idle, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ retryCount, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ cmd.val = append(cmd.val, XPendingExt{
+ ID: id,
+ Consumer: consumer,
+ Idle: time.Duration(idle) * time.Millisecond,
+ RetryCount: retryCount,
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+ baseCmd
+ val []XInfoGroup
+}
+
+type XInfoGroup struct {
+ Name string
+ Consumers int64
+ Pending int64
+ LastDeliveredID string
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+ return &XInfoGroupsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "groups", stream},
+ },
+ }
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+ return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]XInfoGroup, n)
+
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = readXGroupInfo(rd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
+ var group XInfoGroup
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return group, err
+ }
+ if n != 8 {
+ return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
+ }
+
+ for i := 0; i < 4; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return group, err
+ }
+
+ val, err := rd.ReadString()
+ if err != nil {
+ return group, err
+ }
+
+ switch key {
+ case "name":
+ group.Name = val
+ case "consumers":
+ group.Consumers, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
+ case "pending":
+ group.Pending, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return group, err
+ }
+ case "last-delivered-id":
+ group.LastDeliveredID = val
+ default:
+ return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
+ }
+ }
+
+ return group, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+ baseCmd
+ val *XInfoStream
+}
+
+type XInfoStream struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ Groups int64
+ LastGeneratedID string
+ FirstEntry XMessage
+ LastEntry XMessage
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+ return &XInfoStreamCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "stream", stream},
+ },
+ }
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadReply(xStreamInfoParser)
+ if err != nil {
+ return err
+ }
+ cmd.val = v.(*XInfoStream)
+ return nil
+}
+
+func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 14 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+ "wanted 14", n)
+ }
+ var info XInfoStream
+ for i := 0; i < 7; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ switch key {
+ case "length":
+ info.Length, err = rd.ReadIntReply()
+ case "radix-tree-keys":
+ info.RadixTreeKeys, err = rd.ReadIntReply()
+ case "radix-tree-nodes":
+ info.RadixTreeNodes, err = rd.ReadIntReply()
+ case "groups":
+ info.Groups, err = rd.ReadIntReply()
+ case "last-generated-id":
+ info.LastGeneratedID, err = rd.ReadString()
+ case "first-entry":
+ info.FirstEntry, err = readXMessage(rd)
+ case "last-entry":
+ info.LastEntry, err = readXMessage(rd)
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", key)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &info, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]Z, n/2)
+ for i := 0; i < len(cmd.val); i++ {
+ member, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ score, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = Z{
+ Member: member,
+ Score: score,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 3 {
+ return nil, fmt.Errorf("got %d elements, expected 3", n)
+ }
+
+ cmd.val = &ZWithKey{}
+ var err error
+
+ cmd.val.Key, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Member, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
+ cmd.page, cmd.cursor, err = rd.ReadScanReply()
+ return err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ ID string
+ Addr string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]ClusterSlot, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 2 {
+ err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ return nil, err
+ }
+
+ start, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ end, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]ClusterNode, n-2)
+ for j := 0; j < len(nodes); j++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 && n != 3 {
+ err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+ return nil, err
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if n == 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].ID = id
+ }
+ }
+
+ cmd.val[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: geoLocationArgs(q, args...),
+ },
+ q: q,
+ }
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return args
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ if err != nil {
+ return err
+ }
+ cmd.locations = v.([]GeoLocation)
+ return nil
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ locs := make([]GeoLocation, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(newGeoLocationParser(q))
+ if err != nil {
+ return nil, err
+ }
+ switch vv := v.(type) {
+ case string:
+ locs = append(locs, GeoLocation{
+ Name: vv,
+ })
+ case *GeoLocation:
+ // TODO: avoid copying
+ locs = append(locs, *vv)
+ default:
+ return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+ }
+ }
+ return locs, nil
+ }
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ var loc GeoLocation
+ var err error
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if q.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithGeoHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithCoord {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 {
+ return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &loc, nil
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]*GeoPos, n)
+ for i := 0; i < len(cmd.val); i++ {
+ i := i
+ _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ longitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ latitude, err := rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val[i] = &GeoPos{
+ Longitude: longitude,
+ Latitude: latitude,
+ }
+ return nil, nil
+ })
+ if err != nil {
+ if err == Nil {
+ cmd.val[i] = nil
+ continue
+ }
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ ACLFlags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make(map[string]*CommandInfo, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(commandInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ vv := v.(*CommandInfo)
+ cmd.val[vv.Name] = vv
+ }
+ return nil, nil
+ })
+ return err
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ const numArgRedis5 = 6
+ const numArgRedis6 = 7
+
+ switch n {
+ case numArgRedis5, numArgRedis6:
+ // continue
+ default:
+ return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
+ }
+
+ var cmd CommandInfo
+ var err error
+
+ cmd.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ arity, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Arity = int8(arity)
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.Flags = make([]string, n)
+ for i := 0; i < len(cmd.Flags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.Flags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.Flags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ firstKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.StepCount = int8(stepCount)
+
+ for _, flag := range cmd.Flags {
+ if flag == "readonly" {
+ cmd.ReadOnly = true
+ break
+ }
+ }
+
+ if n == numArgRedis5 {
+ return &cmd, nil
+ }
+
+ _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.ACLFlags = make([]string, n)
+ for i := 0; i < len(cmd.ACLFlags); i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ cmd.ACLFlags[i] = ""
+ case err != nil:
+ return nil, err
+ default:
+ cmd.ACLFlags[i] = s
+ }
+ }
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Extensions have cmd names in upper case. Convert them to lower case.
+ for k, v := range cmds {
+ lower := internal.ToLower(k)
+ if lower != k {
+ cmds[lower] = v
+ }
+ }
+
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+ ID int64
+ Time time.Time
+ Duration time.Duration
+ Args []string
+ // These are also optional fields emitted only by Redis 4.0 or greater:
+ // https://redis.io/commands/slowlog#output-format
+ ClientAddr string
+ ClientName string
+}
+
+type SlowLogCmd struct {
+ baseCmd
+
+ val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+ return &SlowLogCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+ return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *SlowLogCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]SlowLog, n)
+ for i := 0; i < len(cmd.val); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 4 {
+ err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
+ return nil, err
+ }
+
+ id, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ createdAt, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ createdAtTime := time.Unix(createdAt, 0)
+
+ costs, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ costsDuration := time.Duration(costs) * time.Microsecond
+
+ cmdLen, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if cmdLen < 1 {
+ err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+ return nil, err
+ }
+
+ cmdString := make([]string, cmdLen)
+ for i := 0; i < cmdLen; i++ {
+ cmdString[i], err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var address, name string
+ for i := 4; i < n; i++ {
+ str, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if i == 4 {
+ address = str
+ } else if i == 5 {
+ name = str
+ }
+ }
+
+ cmd.val[i] = SlowLog{
+ ID: id,
+ Time: createdAtTime,
+ Duration: costsDuration,
+ Args: cmdString,
+ ClientAddr: address,
+ ClientName: name,
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
new file mode 100644
index 0000000..79698ba
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/commands.go
@@ -0,0 +1,2773 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "io"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+)
+
+// KeepTTL is an option for Set command to keep key's existing TTL.
+// For example:
+//
+// rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+ dur, time.Millisecond,
+ )
+ return 1
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logger.Printf(
+ ctx,
+ "specified duration is %s, but minimal supported value is %s - truncating to 1s",
+ dur, time.Second,
+ )
+ return 1
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ return appendArg(dst, src[0])
+ }
+
+ dst = append(dst, src...)
+ return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+ switch arg := arg.(type) {
+ case []string:
+ for _, s := range arg {
+ dst = append(dst, s)
+ }
+ return dst
+ case []interface{}:
+ dst = append(dst, arg...)
+ return dst
+ case map[string]interface{}:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
+ default:
+ return append(dst, arg)
+ }
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command(ctx context.Context) *CommandsInfoCmd
+ ClientGetName(ctx context.Context) *StringCmd
+ Echo(ctx context.Context, message interface{}) *StringCmd
+ Ping(ctx context.Context) *StatusCmd
+ Quit(ctx context.Context) *StatusCmd
+ Del(ctx context.Context, keys ...string) *IntCmd
+ Unlink(ctx context.Context, keys ...string) *IntCmd
+ Dump(ctx context.Context, key string) *StringCmd
+ Exists(ctx context.Context, keys ...string) *IntCmd
+ Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ Keys(ctx context.Context, pattern string) *StringSliceCmd
+ Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+ Move(ctx context.Context, key string, db int) *BoolCmd
+ ObjectRefCount(ctx context.Context, key string) *IntCmd
+ ObjectEncoding(ctx context.Context, key string) *StringCmd
+ ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+ Persist(ctx context.Context, key string) *BoolCmd
+ PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ PTTL(ctx context.Context, key string) *DurationCmd
+ RandomKey(ctx context.Context) *StringCmd
+ Rename(ctx context.Context, key, newkey string) *StatusCmd
+ RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+ Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+ Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+ SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+ SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+ Touch(ctx context.Context, keys ...string) *IntCmd
+ TTL(ctx context.Context, key string) *DurationCmd
+ Type(ctx context.Context, key string) *StatusCmd
+ Append(ctx context.Context, key, value string) *IntCmd
+ Decr(ctx context.Context, key string) *IntCmd
+ DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+ Get(ctx context.Context, key string) *StringCmd
+ GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+ GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ Incr(ctx context.Context, key string) *IntCmd
+ IncrBy(ctx context.Context, key string, value int64) *IntCmd
+ IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+ MGet(ctx context.Context, keys ...string) *SliceCmd
+ MSet(ctx context.Context, values ...interface{}) *StatusCmd
+ MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+ Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+ StrLen(ctx context.Context, key string) *IntCmd
+
+ GetBit(ctx context.Context, key string, offset int64) *IntCmd
+ SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+ BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+ BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+ BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+ BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
+
+ Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+ ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+
+ HDel(ctx context.Context, key string, fields ...string) *IntCmd
+ HExists(ctx context.Context, key, field string) *BoolCmd
+ HGet(ctx context.Context, key, field string) *StringCmd
+ HGetAll(ctx context.Context, key string) *StringStringMapCmd
+ HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+ HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+ HKeys(ctx context.Context, key string) *StringSliceCmd
+ HLen(ctx context.Context, key string) *IntCmd
+ HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+ HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+ HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+ HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+ HVals(ctx context.Context, key string) *StringSliceCmd
+
+ BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+ LIndex(ctx context.Context, key string, index int64) *StringCmd
+ LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+ LLen(ctx context.Context, key string) *IntCmd
+ LPop(ctx context.Context, key string) *StringCmd
+ LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+ LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+ LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+ LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+ LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+ RPop(ctx context.Context, key string) *StringCmd
+ RPopLPush(ctx context.Context, source, destination string) *StringCmd
+ RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+ RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+
+ SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SCard(ctx context.Context, key string) *IntCmd
+ SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SInter(ctx context.Context, keys ...string) *StringSliceCmd
+ SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+ SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMembers(ctx context.Context, key string) *StringSliceCmd
+ SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+ SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+ SPop(ctx context.Context, key string) *StringCmd
+ SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRandMember(ctx context.Context, key string) *StringCmd
+ SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+ SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+ SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+ XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+ XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+ XLen(ctx context.Context, stream string) *IntCmd
+ XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+ XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+ XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+ XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+ XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+ XPending(ctx context.Context, stream, group string) *XPendingCmd
+ XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+ XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+ XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+
+ BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+ ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
+ ZCard(ctx context.Context, key string) *IntCmd
+ ZCount(ctx context.Context, key, min, max string) *IntCmd
+ ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+ ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+ ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRank(ctx context.Context, key, member string) *IntCmd
+ ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+ ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+ ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRevRank(ctx context.Context, key, member string) *IntCmd
+ ZScore(ctx context.Context, key, member string) *FloatCmd
+ ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+
+ PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+ PFCount(ctx context.Context, keys ...string) *IntCmd
+ PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+
+ BgRewriteAOF(ctx context.Context) *StatusCmd
+ BgSave(ctx context.Context) *StatusCmd
+ ClientKill(ctx context.Context, ipPort string) *StatusCmd
+ ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+ ClientList(ctx context.Context) *StringCmd
+ ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+ ClientID(ctx context.Context) *IntCmd
+ ConfigGet(ctx context.Context, parameter string) *SliceCmd
+ ConfigResetStat(ctx context.Context) *StatusCmd
+ ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+ ConfigRewrite(ctx context.Context) *StatusCmd
+ DBSize(ctx context.Context) *IntCmd
+ FlushAll(ctx context.Context) *StatusCmd
+ FlushAllAsync(ctx context.Context) *StatusCmd
+ FlushDB(ctx context.Context) *StatusCmd
+ FlushDBAsync(ctx context.Context) *StatusCmd
+ Info(ctx context.Context, section ...string) *StringCmd
+ LastSave(ctx context.Context) *IntCmd
+ Save(ctx context.Context) *StatusCmd
+ Shutdown(ctx context.Context) *StatusCmd
+ ShutdownSave(ctx context.Context) *StatusCmd
+ ShutdownNoSave(ctx context.Context) *StatusCmd
+ SlaveOf(ctx context.Context, host, port string) *StatusCmd
+ Time(ctx context.Context) *TimeCmd
+ DebugObject(ctx context.Context, key string) *StringCmd
+ ReadOnly(ctx context.Context) *StatusCmd
+ ReadWrite(ctx context.Context) *StatusCmd
+ MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptFlush(ctx context.Context) *StatusCmd
+ ScriptKill(ctx context.Context) *StatusCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+
+ Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+ PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+ PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
+ PubSubNumPat(ctx context.Context) *IntCmd
+
+ ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+ ClusterNodes(ctx context.Context) *StringCmd
+ ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+ ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+ ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+ ClusterResetSoft(ctx context.Context) *StatusCmd
+ ClusterResetHard(ctx context.Context) *StatusCmd
+ ClusterInfo(ctx context.Context) *StringCmd
+ ClusterKeySlot(ctx context.Context, key string) *IntCmd
+ ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+ ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+ ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+ ClusterSaveConfig(ctx context.Context) *StatusCmd
+ ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+ ClusterFailover(ctx context.Context) *StatusCmd
+ ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+ ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+
+ GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+ GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+ GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+ GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(ctx context.Context, password string) *StatusCmd
+ AuthACL(ctx context.Context, username, password string) *StatusCmd
+ Select(ctx context.Context, index int) *StatusCmd
+ SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+ ClientSetName(ctx context.Context, name string) *BoolCmd
+}
+
+var (
+ _ Cmdable = (*Client)(nil)
+ _ Cmdable = (*Tx)(nil)
+ _ Cmdable = (*Ring)(nil)
+ _ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "auth", username, password)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "select", index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "setname", name)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd(ctx, "command")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "getname")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "echo", message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "ping")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Quit(ctx context.Context) *StatusCmd {
+ panic("not implemented")
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "dump", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "keys", pattern)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "move", key, db)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "object", "refcount", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "object", "encoding", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "persist", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ ctx,
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "randomkey")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "rename", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "restore",
+ key,
+ formatMs(ctx, ttl),
+ value,
+ "replace",
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+ args := []interface{}{"sort", key}
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+ args := sort.args(key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(ctx, sort.args(key)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+ cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "type", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "append", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "decr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "decrby", key, decrement)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "get", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "getrange", key, start, end)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd(ctx, "getset", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "incr", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "incrby", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+// - MSet("key1", "value1", "key2", "value2")
+// - MSet([]string{"key1", "value1", "key2", "value2"})
+// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "mset"
+ args = appendArgs(args, values)
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+// - MSetNX("key1", "value1", "key2", "value2")
+// - MSetNX([]string{"key1", "value1", "key2", "value2"})
+// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(values))
+ args[0] = "msetnx"
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+// Use expiration for `SETEX`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 5)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SETEX key expiration value` command.
+func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd(ctx, "setnx", key, value)
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ switch expiration {
+ case 0:
+ cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+ case KeepTTL:
+ cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+ default:
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+ } else {
+ cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+ }
+ }
+
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "strlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "getbit", key, offset)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ )
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+ return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+ return c.bitOp(ctx, "not", destKey, key)
+}
+
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
+ a := make([]interface{}, 0, 2+len(args))
+ a = append(a, "bitfield")
+ a = append(a, key)
+ a = append(a, args...)
+ cmd := NewIntSliceCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hexists", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+ cmd := NewStringCmd(ctx, "hget", key, field)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "hgetall", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hkeys", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "hlen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HSet accepts values in following formats:
+// - HSet("myhash", "key1", "value1", "key2", "value2")
+// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it requires Redis v4 for multiple field/value pairs support.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "hmset"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewBoolCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "hvals", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(ctx, timeout)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ ctx,
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(ctx, timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+ cmd := NewStringCmd(ctx, "lindex", key, index)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "llen", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type LPosArgs struct {
+ Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+ args := []interface{}{"lpos", key, value}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+ args := []interface{}{"lpos", key, value, "count", count}
+ if a.Rank != 0 {
+ args = append(args, "rank", a.Rank)
+ }
+ if a.MaxLen != 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ cmd := NewIntSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ ctx,
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "lrem", key, count, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "lset", key, index, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ ctx,
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+ cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpushx"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "scard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "sismember", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd(ctx, "smembers", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "spop", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "spop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "srandmember", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// XAddArgs accepts values in the following formats:
+// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+type XAddArgs struct {
+ Stream string
+ MaxLen int64 // MAXLEN N
+ MaxLenApprox int64 // MAXLEN ~ N
+ ID string
+ Values interface{}
+}
+
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xadd")
+ args = append(args, a.Stream)
+ if a.MaxLen > 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ } else if a.MaxLenApprox > 0 {
+ args = append(args, "maxlen", "~", a.MaxLenApprox)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ args = appendArg(args, a.Values)
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xlen", stream)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 5+len(a.Streams))
+ args = append(args, "xread")
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.setFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+ return c.XRead(ctx, &XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 8+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+ keyPos := int8(1)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ keyPos += 2
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ keyPos += 2
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ keyPos++
+ }
+ args = append(args, "streams")
+ keyPos++
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(ctx, args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ cmd.setFirstKeyPos(keyPos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 4+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", maxLen)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", "~", maxLen)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+ cmd := NewXInfoGroupsCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+ cmd := NewXInfoStreamCmd(ctx, key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+ Keys []string
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+// Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(ctx, timeout)
+ cmd := NewZWithKeyCmd(ctx, args...)
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zAdd(ctx context.Context, a []interface{}, n int, members ...*Z) *IntCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewIntCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 2
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1] = "zadd", key
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "nx"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "xx"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+ return c.zAdd(ctx, a, n, members...)
+}
+
+func (c cmdable) zIncr(ctx context.Context, a []interface{}, n int, members ...*Z) *FloatCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewFloatCmd(ctx, a...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 3
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2] = "zadd", key, "incr"
+ return c.zIncr(ctx, a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+ return c.zIncr(ctx, a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+ return c.zIncr(ctx, a, n, member)
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcard", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zinterstore"
+ args[1] = destination
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(ctx, args...)
+ cmd.setFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRange(ctx context.Context, key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []interface{}{
+ "zrange",
+ key,
+ start,
+ stop,
+ }
+ if withScores {
+ args = append(args, "withscores")
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(ctx, key, start, stop, false)
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ ctx,
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+ cmd := NewIntCmd(ctx, "zrevrank", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+ cmd := NewFloatCmd(ctx, "zscore", key, member)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+ args := make([]interface{}, 3+len(store.Keys))
+ args[0] = "zunionstore"
+ args[1] = dest
+ args[2] = len(store.Keys)
+ for i, key := range store.Keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+
+ cmd := NewIntCmd(ctx, args...)
+ cmd.setFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgrewriteaof")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "bgsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+// CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "client"
+ args[1] = "kill"
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "client", "list")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+ cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "id")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+ cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "config", "get", parameter)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "resetstat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "config", "rewrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "dbsize")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushall", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "flushdb", "async")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
+ args := []interface{}{"info"}
+ if len(section) > 0 {
+ args = append(args, section[0])
+ }
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "lastsave")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "save")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+ var args []interface{}
+ if modifier == "" {
+ args = []interface{}{"shutdown"}
+ } else {
+ args = []interface{}{"shutdown", modifier}
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ if err := cmd.Err(); err != nil {
+ if err == io.EOF {
+ // Server quit as expected.
+ cmd.err = nil
+ }
+ } else {
+ // Server did not quit. String reply contains the reason.
+ cmd.err = errors.New(cmd.val)
+ cmd.val = ""
+ }
+ return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+ return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "slaveof", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+ cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) Sync(ctx context.Context) {
+ panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+ cmd := NewTimeCmd(ctx, "time")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "debug", "object", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readonly")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "readwrite")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+ args := []interface{}{"memory", "usage", key}
+ if len(samples) > 0 {
+ if len(samples) != 1 {
+ panic("MemoryUsage expects single sample count")
+ }
+ args = append(args, "SAMPLES", samples[0])
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "eval"
+ cmdArgs[1] = script
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+ cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+ cmdArgs[0] = "evalsha"
+ cmdArgs[1] = sha1
+ cmdArgs[2] = len(keys)
+ for i, key := range keys {
+ cmdArgs[3+i] = key
+ }
+ cmdArgs = appendArgs(cmdArgs, args)
+ cmd := NewCmd(ctx, cmdArgs...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
+ }
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "kill")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+ cmd := NewIntCmd(ctx, "publish", channel, message)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+ args := []interface{}{"pubsub", "channels"}
+ if pattern != "*" {
+ args = append(args, pattern)
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
+ args := make([]interface{}, 2+len(channels))
+ args[0] = "pubsub"
+ args[1] = "numsub"
+ for i, channel := range channels {
+ args[2+i] = channel
+ }
+ cmd := NewStringIntMapCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+ cmd := NewIntCmd(ctx, "pubsub", "numpat")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+ cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "nodes")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "cluster", "info")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+ cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "delslots"
+ for i, slot := range slots {
+ args[2+i] = slot
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "cluster", "failover")
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+ args := make([]interface{}, 2+len(slots))
+ args[0] = "cluster"
+ args[1] = "addslots"
+ for i, num := range slots {
+ args[2+i] = num
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+ size := max - min + 1
+ slots := make([]int, size)
+ for i := 0; i < size; i++ {
+ slots[i] = min + i
+ }
+ return c.ClusterAddSlots(ctx, slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+ args := make([]interface{}, 2+3*len(geoLocation))
+ args[0] = "geoadd"
+ args[1] = key
+ for i, eachLoc := range geoLocation {
+ args[2+3*i] = eachLoc.Longitude
+ args[2+3*i+1] = eachLoc.Latitude
+ args[2+3*i+2] = eachLoc.Name
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+ ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+ cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+ if query.Store != "" || query.StoreDist != "" {
+ cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+ ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+ args := geoLocationArgs(query, "georadiusbymember", key, member)
+ cmd := NewIntCmd(ctx, args...)
+ if query.Store == "" && query.StoreDist == "" {
+ cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+ return cmd
+ }
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoDist(
+ ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+ if unit == "" {
+ unit = "km"
+ }
+ cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geohash"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "geopos"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewGeoPosCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/doc.go b/vendor/github.com/go-redis/redis/v8/doc.go
new file mode 100644
index 0000000..5526253
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
new file mode 100644
index 0000000..9fe1376
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/error.go
@@ -0,0 +1,122 @@
+package redis
+
+import (
+ "context"
+ "io"
+ "net"
+ "strings"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+var ErrClosed = pool.ErrClosed
+
+type Error interface {
+ error
+
+ // RedisError is a no-op function but
+ // serves to distinguish types that are Redis
+ // errors from ordinary errors: a type is a
+ // Redis error if it has a RedisError method.
+ RedisError()
+}
+
+var _ Error = proto.RedisError("")
+
+func shouldRetry(err error, retryTimeout bool) bool {
+ switch err {
+ case io.EOF, io.ErrUnexpectedEOF:
+ return true
+ case nil, context.Canceled, context.DeadlineExceeded:
+ return false
+ }
+
+ if v, ok := err.(timeoutError); ok {
+ if v.Timeout() {
+ return retryTimeout
+ }
+ return true
+ }
+
+ s := err.Error()
+ if s == "ERR max number of clients reached" {
+ return true
+ }
+ if strings.HasPrefix(s, "LOADING ") {
+ return true
+ }
+ if strings.HasPrefix(s, "READONLY ") {
+ return true
+ }
+ if strings.HasPrefix(s, "CLUSTERDOWN ") {
+ return true
+ }
+ if strings.HasPrefix(s, "TRYAGAIN ") {
+ return true
+ }
+
+ return false
+}
+
+func isRedisError(err error) bool {
+ _, ok := err.(proto.RedisError)
+ return ok
+}
+
+func isBadConn(err error, allowTimeout bool) bool {
+ if err == nil {
+ return false
+ }
+
+ if isRedisError(err) {
+ // Close connections in read only state in case domain addr is used
+ // and domain resolves to a different Redis Server. See #790.
+ return isReadOnlyError(err)
+ }
+
+ if allowTimeout {
+ if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+ return !netErr.Temporary()
+ }
+ }
+
+ return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+ if !isRedisError(err) {
+ return
+ }
+
+ s := err.Error()
+ switch {
+ case strings.HasPrefix(s, "MOVED "):
+ moved = true
+ case strings.HasPrefix(s, "ASK "):
+ ask = true
+ default:
+ return
+ }
+
+ ind := strings.LastIndex(s, " ")
+ if ind == -1 {
+ return false, false, ""
+ }
+ addr = s[ind+1:]
+ return
+}
+
+func isLoadingError(err error) bool {
+ return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+ return strings.HasPrefix(err.Error(), "READONLY ")
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutError interface {
+ Timeout() bool
+}
diff --git a/vendor/github.com/go-redis/redis/v8/go.mod b/vendor/github.com/go-redis/redis/v8/go.mod
new file mode 100644
index 0000000..7be1b7f
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.mod
@@ -0,0 +1,11 @@
+module github.com/go-redis/redis/v8
+
+go 1.11
+
+require (
+ github.com/cespare/xxhash/v2 v2.1.1
+ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+ github.com/onsi/ginkgo v1.14.2
+ github.com/onsi/gomega v1.10.3
+ go.opentelemetry.io/otel v0.13.0
+)
diff --git a/vendor/github.com/go-redis/redis/v8/go.sum b/vendor/github.com/go-redis/redis/v8/go.sum
new file mode 100644
index 0000000..e1e8ab1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.sum
@@ -0,0 +1,82 @@
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M=
+github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+go.opentelemetry.io/otel v0.13.0 h1:2isEnyzjjJZq6r2EKMsFj4TxiQiexsM04AVhwbR/oBA=
+go.opentelemetry.io/otel v0.13.0/go.mod h1:dlSNewoRYikTkotEnxdmuBHgzT+k/idJSfDv/FxEnOY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/go-redis/redis/v8/internal/arg.go b/vendor/github.com/go-redis/redis/v8/internal/arg.go
new file mode 100644
index 0000000..b97fa0d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/arg.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+)
+
+func AppendArg(b []byte, v interface{}) []byte {
+ switch v := v.(type) {
+ case nil:
+ return append(b, "<nil>"...)
+ case string:
+ return appendUTF8String(b, Bytes(v))
+ case []byte:
+ return appendUTF8String(b, v)
+ case int:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int8:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int16:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int32:
+ return strconv.AppendInt(b, int64(v), 10)
+ case int64:
+ return strconv.AppendInt(b, v, 10)
+ case uint:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint8:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint16:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint32:
+ return strconv.AppendUint(b, uint64(v), 10)
+ case uint64:
+ return strconv.AppendUint(b, v, 10)
+ case float32:
+ return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
+ case float64:
+ return strconv.AppendFloat(b, v, 'f', -1, 64)
+ case bool:
+ if v {
+ return append(b, "true"...)
+ }
+ return append(b, "false"...)
+ case time.Time:
+ return v.AppendFormat(b, time.RFC3339Nano)
+ default:
+ return append(b, fmt.Sprint(v)...)
+ }
+}
+
+func appendUTF8String(dst []byte, src []byte) []byte {
+ dst = append(dst, src...)
+ return dst
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
new file mode 100644
index 0000000..2fc74ad
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
@@ -0,0 +1,78 @@
+package hashtag
+
+import (
+ "strings"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+ 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+ 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+ 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+ 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+ 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+ 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+ 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+ 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+ 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+ 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+ 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+ 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+ 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+ 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+ 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+ 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+ 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+ 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+ 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+ 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+ 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+ 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+ if s := strings.IndexByte(key, '{'); s > -1 {
+ if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+ return key[s+1 : s+e+1]
+ }
+ }
+ return key
+}
+
+func RandomSlot() int {
+ return rand.Intn(slotNumber)
+}
+
+// hashSlot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+ if key == "" {
+ return RandomSlot()
+ }
+ key = Key(key)
+ return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+ for i := 0; i < len(key); i++ {
+ crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+ }
+ return
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/instruments.go b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
new file mode 100644
index 0000000..e837526
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
@@ -0,0 +1,33 @@
+package internal
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/api/global"
+ "go.opentelemetry.io/otel/api/metric"
+)
+
+var (
+ // WritesCounter is a count of write commands performed.
+ WritesCounter metric.Int64Counter
+ // NewConnectionsCounter is a count of new connections.
+ NewConnectionsCounter metric.Int64Counter
+)
+
+func init() {
+ defer func() {
+ if r := recover(); r != nil {
+ Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
+ }
+ }()
+
+ meter := metric.Must(global.Meter("github.com/go-redis/redis"))
+
+ WritesCounter = meter.NewInt64Counter("redis.writes",
+ metric.WithDescription("the number of writes initiated"),
+ )
+
+ NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
+ metric.WithDescription("the number of connections created"),
+ )
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/internal.go b/vendor/github.com/go-redis/redis/v8/internal/internal.go
new file mode 100644
index 0000000..4a59c59
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/internal.go
@@ -0,0 +1,29 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+ if retry < 0 {
+ panic("not reached")
+ }
+ if minBackoff == 0 {
+ return 0
+ }
+
+ d := minBackoff << uint(retry)
+ if d < minBackoff {
+ return maxBackoff
+ }
+
+ d = minBackoff + time.Duration(rand.Int63n(int64(d)))
+
+ if d > maxBackoff || d < minBackoff {
+ d = maxBackoff
+ }
+
+ return d
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
new file mode 100644
index 0000000..3810f9e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/log.go
@@ -0,0 +1,24 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+)
+
+type Logging interface {
+ Printf(ctx context.Context, format string, v ...interface{})
+}
+
+type logger struct {
+ log *log.Logger
+}
+
+func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
+ _ = l.log.Output(2, fmt.Sprintf(format, v...))
+}
+
+var Logger Logging = &logger{
+ log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go
new file mode 100644
index 0000000..64f4627
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/once.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+ m sync.Mutex
+ done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once. In other words, given
+// var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error. A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once. Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+ if atomic.LoadUint32(&o.done) == 1 {
+ return nil
+ }
+ // Slow-path.
+ o.m.Lock()
+ defer o.m.Unlock()
+ var err error
+ if o.done == 0 {
+ err = f()
+ if err == nil {
+ atomic.StoreUint32(&o.done, 1)
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
new file mode 100644
index 0000000..08a2071
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
@@ -0,0 +1,136 @@
+package pool
+
+import (
+ "bufio"
+ "context"
+ "net"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+ usedAt int64 // atomic
+ netConn net.Conn
+
+ rd *proto.Reader
+ bw *bufio.Writer
+ wr *proto.Writer
+
+ Inited bool
+ pooled bool
+ createdAt time.Time
+}
+
+func NewConn(netConn net.Conn) *Conn {
+ cn := &Conn{
+ netConn: netConn,
+ createdAt: time.Now(),
+ }
+ cn.rd = proto.NewReader(netConn)
+ cn.bw = bufio.NewWriter(netConn)
+ cn.wr = proto.NewWriter(cn.bw)
+ cn.SetUsedAt(time.Now())
+ return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+ unix := atomic.LoadInt64(&cn.usedAt)
+ return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+ atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+ cn.netConn = netConn
+ cn.rd.Reset(netConn)
+ cn.bw.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+ return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+ if cn.netConn != nil {
+ return cn.netConn.RemoteAddr()
+ }
+ return nil
+}
+
+func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
+ return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error {
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+ if err := fn(cn.rd); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+ return nil
+ })
+}
+
+func (cn *Conn) WithWriter(
+ ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+ return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error {
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+
+ if cn.bw.Buffered() > 0 {
+ cn.bw.Reset(cn.netConn)
+ }
+
+ if err := fn(cn.wr); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+
+ if err := cn.bw.Flush(); err != nil {
+ return internal.RecordError(ctx, err)
+ }
+
+ internal.WritesCounter.Add(ctx, 1)
+
+ return nil
+ })
+}
+
+func (cn *Conn) Close() error {
+ return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+ tm := time.Now()
+ cn.SetUsedAt(tm)
+
+ if timeout > 0 {
+ tm = tm.Add(timeout)
+ }
+
+ if ctx != nil {
+ deadline, ok := ctx.Deadline()
+ if ok {
+ if timeout == 0 {
+ return deadline
+ }
+ if deadline.Before(tm) {
+ return deadline
+ }
+ return tm
+ }
+ }
+
+ if timeout > 0 {
+ return tm
+ }
+
+ return noDeadline
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
new file mode 100644
index 0000000..355742b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
@@ -0,0 +1,524 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+)
+
+var (
+ ErrClosed = errors.New("redis: client is closed")
+ ErrPoolTimeout = errors.New("redis: connection pool timeout")
+)
+
+var timers = sync.Pool{
+ New: func() interface{} {
+ t := time.NewTimer(time.Hour)
+ t.Stop()
+ return t
+ },
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+ Hits uint32 // number of times free connection was found in the pool
+ Misses uint32 // number of times free connection was NOT found in the pool
+ Timeouts uint32 // number of times a wait timeout occurred
+
+ TotalConns uint32 // number of total connections in the pool
+ IdleConns uint32 // number of idle connections in the pool
+ StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+ NewConn(context.Context) (*Conn, error)
+ CloseConn(*Conn) error
+
+ Get(context.Context) (*Conn, error)
+ Put(context.Context, *Conn)
+ Remove(context.Context, *Conn, error)
+
+ Len() int
+ IdleLen() int
+ Stats() *Stats
+
+ Close() error
+}
+
+type Options struct {
+ Dialer func(context.Context) (net.Conn, error)
+ OnClose func(*Conn) error
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+}
+
+type lastDialErrorWrap struct {
+ err error
+}
+
+type ConnPool struct {
+ opt *Options
+
+ dialErrorsNum uint32 // atomic
+
+ lastDialError atomic.Value
+
+ queue chan struct{}
+
+ connsMu sync.Mutex
+ conns []*Conn
+ idleConns []*Conn
+ poolSize int
+ idleConnsLen int
+
+ stats Stats
+
+ _closed uint32 // atomic
+ closedCh chan struct{}
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+ p := &ConnPool{
+ opt: opt,
+
+ queue: make(chan struct{}, opt.PoolSize),
+ conns: make([]*Conn, 0, opt.PoolSize),
+ idleConns: make([]*Conn, 0, opt.PoolSize),
+ closedCh: make(chan struct{}),
+ }
+
+ p.connsMu.Lock()
+ p.checkMinIdleConns()
+ p.connsMu.Unlock()
+
+ if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+ go p.reaper(opt.IdleCheckFrequency)
+ }
+
+ return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+ if p.opt.MinIdleConns == 0 {
+ return
+ }
+ for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+ p.poolSize++
+ p.idleConnsLen++
+ go func() {
+ err := p.addIdleConn()
+ if err != nil {
+ p.connsMu.Lock()
+ p.poolSize--
+ p.idleConnsLen--
+ p.connsMu.Unlock()
+ }
+ }()
+ }
+}
+
+func (p *ConnPool) addIdleConn() error {
+ cn, err := p.dialConn(context.TODO(), true)
+ if err != nil {
+ return err
+ }
+
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ p.idleConns = append(p.idleConns, cn)
+ p.connsMu.Unlock()
+ return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+ cn, err := p.dialConn(ctx, pooled)
+ if err != nil {
+ return nil, err
+ }
+
+ p.connsMu.Lock()
+ p.conns = append(p.conns, cn)
+ if pooled {
+ // If pool is full remove the cn on next Put.
+ if p.poolSize >= p.opt.PoolSize {
+ cn.pooled = false
+ } else {
+ p.poolSize++
+ }
+ }
+ p.connsMu.Unlock()
+
+ return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+ return nil, p.getLastDialError()
+ }
+
+ netConn, err := p.opt.Dialer(ctx)
+ if err != nil {
+ p.setLastDialError(err)
+ if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+ go p.tryDial()
+ }
+ return nil, err
+ }
+
+ internal.NewConnectionsCounter.Add(ctx, 1)
+ cn := NewConn(netConn)
+ cn.pooled = pooled
+ return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+ for {
+ if p.closed() {
+ return
+ }
+
+ conn, err := p.opt.Dialer(context.Background())
+ if err != nil {
+ p.setLastDialError(err)
+ time.Sleep(time.Second)
+ continue
+ }
+
+ atomic.StoreUint32(&p.dialErrorsNum, 0)
+ _ = conn.Close()
+ return
+ }
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+ p.lastDialError.Store(&lastDialErrorWrap{err: err})
+}
+
+func (p *ConnPool) getLastDialError() error {
+ err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
+ if err != nil {
+ return err.err
+ }
+ return nil
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+
+ err := p.waitTurn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ p.connsMu.Lock()
+ cn := p.popIdle()
+ p.connsMu.Unlock()
+
+ if cn == nil {
+ break
+ }
+
+ if p.isStaleConn(cn) {
+ _ = p.CloseConn(cn)
+ continue
+ }
+
+ atomic.AddUint32(&p.stats.Hits, 1)
+ return cn, nil
+ }
+
+ atomic.AddUint32(&p.stats.Misses, 1)
+
+ newcn, err := p.newConn(ctx, true)
+ if err != nil {
+ p.freeTurn()
+ return nil, err
+ }
+
+ return newcn, nil
+}
+
+func (p *ConnPool) getTurn() {
+ p.queue <- struct{}{}
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ select {
+ case p.queue <- struct{}{}:
+ return nil
+ default:
+ }
+
+ timer := timers.Get().(*time.Timer)
+ timer.Reset(p.opt.PoolTimeout)
+
+ select {
+ case <-ctx.Done():
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return ctx.Err()
+ case p.queue <- struct{}{}:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timers.Put(timer)
+ return nil
+ case <-timer.C:
+ timers.Put(timer)
+ atomic.AddUint32(&p.stats.Timeouts, 1)
+ return ErrPoolTimeout
+ }
+}
+
+func (p *ConnPool) freeTurn() {
+ <-p.queue
+}
+
+func (p *ConnPool) popIdle() *Conn {
+ if len(p.idleConns) == 0 {
+ return nil
+ }
+
+ idx := len(p.idleConns) - 1
+ cn := p.idleConns[idx]
+ p.idleConns = p.idleConns[:idx]
+ p.idleConnsLen--
+ p.checkMinIdleConns()
+ return cn
+}
+
+func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
+ if cn.rd.Buffered() > 0 {
+ internal.Logger.Printf(ctx, "Conn has unread data")
+ p.Remove(ctx, cn, BadConnError{})
+ return
+ }
+
+ if !cn.pooled {
+ p.Remove(ctx, cn, nil)
+ return
+ }
+
+ p.connsMu.Lock()
+ p.idleConns = append(p.idleConns, cn)
+ p.idleConnsLen++
+ p.connsMu.Unlock()
+ p.freeTurn()
+}
+
+func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.removeConnWithLock(cn)
+ p.freeTurn()
+ _ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+ p.removeConnWithLock(cn)
+ return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+ p.connsMu.Lock()
+ p.removeConn(cn)
+ p.connsMu.Unlock()
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+ for i, c := range p.conns {
+ if c == cn {
+ p.conns = append(p.conns[:i], p.conns[i+1:]...)
+ if cn.pooled {
+ p.poolSize--
+ p.checkMinIdleConns()
+ }
+ return
+ }
+ }
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+ if p.opt.OnClose != nil {
+ _ = p.opt.OnClose(cn)
+ }
+ return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+ p.connsMu.Lock()
+ n := len(p.conns)
+ p.connsMu.Unlock()
+ return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+ p.connsMu.Lock()
+ n := p.idleConnsLen
+ p.connsMu.Unlock()
+ return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+ idleLen := p.IdleLen()
+ return &Stats{
+ Hits: atomic.LoadUint32(&p.stats.Hits),
+ Misses: atomic.LoadUint32(&p.stats.Misses),
+ Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
+ TotalConns: uint32(p.Len()),
+ IdleConns: uint32(idleLen),
+ StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+ }
+}
+
+func (p *ConnPool) closed() bool {
+ return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+ p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ var firstErr error
+ for _, cn := range p.conns {
+ if fn(cn) {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ }
+ return firstErr
+}
+
+func (p *ConnPool) Close() error {
+ if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+ return ErrClosed
+ }
+ close(p.closedCh)
+
+ var firstErr error
+ p.connsMu.Lock()
+ for _, cn := range p.conns {
+ if err := p.closeConn(cn); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ p.conns = nil
+ p.poolSize = 0
+ p.idleConns = nil
+ p.idleConnsLen = 0
+ p.connsMu.Unlock()
+
+ return firstErr
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ // It is possible that ticker and closedCh arrive together,
+ // and select pseudo-randomly pick ticker case, we double
+ // check here to prevent being executed after closed.
+ if p.closed() {
+ return
+ }
+ _, err := p.ReapStaleConns()
+ if err != nil {
+ internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
+ continue
+ }
+ case <-p.closedCh:
+ return
+ }
+ }
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+ var n int
+ for {
+ p.getTurn()
+
+ p.connsMu.Lock()
+ cn := p.reapStaleConn()
+ p.connsMu.Unlock()
+ p.freeTurn()
+
+ if cn != nil {
+ _ = p.closeConn(cn)
+ n++
+ } else {
+ break
+ }
+ }
+ atomic.AddUint32(&p.stats.StaleConns, uint32(n))
+ return n, nil
+}
+
+func (p *ConnPool) reapStaleConn() *Conn {
+ if len(p.idleConns) == 0 {
+ return nil
+ }
+
+ cn := p.idleConns[0]
+ if !p.isStaleConn(cn) {
+ return nil
+ }
+
+ p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+ p.idleConnsLen--
+ p.removeConn(cn)
+
+ return cn
+}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+ if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+ return false
+ }
+
+ now := time.Now()
+ if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+ return true
+ }
+ if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
new file mode 100644
index 0000000..5a3fde1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
@@ -0,0 +1,58 @@
+package pool
+
+import "context"
+
+type SingleConnPool struct {
+ pool Pooler
+ cn *Conn
+ stickyErr error
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
+ return &SingleConnPool{
+ pool: pool,
+ cn: cn,
+ }
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+ if p.stickyErr != nil {
+ return nil, p.stickyErr
+ }
+ return p.cn, nil
+}
+
+func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
+
+func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ p.cn = nil
+ p.stickyErr = reason
+}
+
+func (p *SingleConnPool) Close() error {
+ p.cn = nil
+ p.stickyErr = ErrClosed
+ return nil
+}
+
+func (p *SingleConnPool) Len() int {
+ return 0
+}
+
+func (p *SingleConnPool) IdleLen() int {
+ return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
new file mode 100644
index 0000000..c3e7e7c
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
@@ -0,0 +1,202 @@
+package pool
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync/atomic"
+)
+
+const (
+ stateDefault = 0
+ stateInited = 1
+ stateClosed = 2
+)
+
+type BadConnError struct {
+ wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+ s := "redis: Conn is in a bad state"
+ if e.wrapped != nil {
+ s += ": " + e.wrapped.Error()
+ }
+ return s
+}
+
+func (e BadConnError) Unwrap() error {
+ return e.wrapped
+}
+
+//------------------------------------------------------------------------------
+
+type StickyConnPool struct {
+ pool Pooler
+ shared int32 // atomic
+
+ state uint32 // atomic
+ ch chan *Conn
+
+ _badConnError atomic.Value
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool Pooler) *StickyConnPool {
+ p, ok := pool.(*StickyConnPool)
+ if !ok {
+ p = &StickyConnPool{
+ pool: pool,
+ ch: make(chan *Conn, 1),
+ }
+ }
+ atomic.AddInt32(&p.shared, 1)
+ return p
+}
+
+func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
+ return p.pool.NewConn(ctx)
+}
+
+func (p *StickyConnPool) CloseConn(cn *Conn) error {
+ return p.pool.CloseConn(cn)
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+ // In worst case this races with Close which is not a very common operation.
+ for i := 0; i < 1000; i++ {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ cn, err := p.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+ return cn, nil
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ case stateInited:
+ if err := p.badConnError(); err != nil {
+ return nil, err
+ }
+ cn, ok := <-p.ch
+ if !ok {
+ return nil, ErrClosed
+ }
+ return cn, nil
+ case stateClosed:
+ return nil, ErrClosed
+ default:
+ panic("not reached")
+ }
+ }
+ return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
+}
+
+func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
+ defer func() {
+ if recover() != nil {
+ p.freeConn(ctx, cn)
+ }
+ }()
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
+ if err := p.badConnError(); err != nil {
+ p.pool.Remove(ctx, cn, err)
+ } else {
+ p.pool.Put(ctx, cn)
+ }
+}
+
+func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+ defer func() {
+ if recover() != nil {
+ p.pool.Remove(ctx, cn, ErrClosed)
+ }
+ }()
+ p._badConnError.Store(BadConnError{wrapped: reason})
+ p.ch <- cn
+}
+
+func (p *StickyConnPool) Close() error {
+ if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
+ return nil
+ }
+
+ for i := 0; i < 1000; i++ {
+ state := atomic.LoadUint32(&p.state)
+ if state == stateClosed {
+ return ErrClosed
+ }
+ if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+ close(p.ch)
+ cn, ok := <-p.ch
+ if ok {
+ p.freeConn(context.TODO(), cn)
+ }
+ return nil
+ }
+ }
+
+ return errors.New("redis: StickyConnPool.Close: infinite loop")
+}
+
+func (p *StickyConnPool) Reset(ctx context.Context) error {
+ if p.badConnError() == nil {
+ return nil
+ }
+
+ select {
+ case cn, ok := <-p.ch:
+ if !ok {
+ return ErrClosed
+ }
+ p.pool.Remove(ctx, cn, ErrClosed)
+ p._badConnError.Store(BadConnError{wrapped: nil})
+ default:
+ return errors.New("redis: StickyConnPool does not have a Conn")
+ }
+
+ if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+ state := atomic.LoadUint32(&p.state)
+ return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
+ }
+
+ return nil
+}
+
+func (p *StickyConnPool) badConnError() error {
+ if v := p._badConnError.Load(); v != nil {
+ err := v.(BadConnError)
+ if err.wrapped != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *StickyConnPool) Len() int {
+ switch atomic.LoadUint32(&p.state) {
+ case stateDefault:
+ return 0
+ case stateInited:
+ return 1
+ case stateClosed:
+ return 0
+ default:
+ panic("not reached")
+ }
+}
+
+func (p *StickyConnPool) IdleLen() int {
+ return len(p.ch)
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+ return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
new file mode 100644
index 0000000..0fbc51e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
@@ -0,0 +1,331 @@
+package proto
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+const (
+ ErrorReply = '-'
+ StatusReply = '+'
+ IntReply = ':'
+ StringReply = '$'
+ ArrayReply = '*'
+)
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil")
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+//------------------------------------------------------------------------------
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+ rd *bufio.Reader
+ _buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+ return &Reader{
+ rd: bufio.NewReader(rd),
+ _buf: make([]byte, 64),
+ }
+}
+
+func (r *Reader) Buffered() int {
+ return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+ return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+ r.rd.Reset(rd)
+}
+
+func (r *Reader) ReadLine() ([]byte, error) {
+ line, err := r.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if isNilReply(line) {
+ return nil, Nil
+ }
+ return line, nil
+}
+
+// readLine that returns an error if:
+// - there is a pending read error;
+// - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+ b, err := r.rd.ReadSlice('\n')
+ if err != nil {
+ if err != bufio.ErrBufferFull {
+ return nil, err
+ }
+
+ full := make([]byte, len(b))
+ copy(full, b)
+
+ b, err = r.rd.ReadBytes('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ full = append(full, b...)
+ b = full
+ }
+ if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+ return nil, fmt.Errorf("redis: invalid reply: %q", b)
+ }
+ return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ case StringReply:
+ return r.readStringReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ if m == nil {
+ err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
+ return nil, err
+ }
+ return m(r, n)
+ }
+ return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) ReadIntReply() (int64, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case IntReply:
+ return util.ParseInt(line[1:], 10, 64)
+ default:
+ return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadString() (string, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return "", err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return "", ParseErrorReply(line)
+ case StringReply:
+ return r.readStringReply(line)
+ case StatusReply:
+ return string(line[1:]), nil
+ case IntReply:
+ return string(line[1:]), nil
+ default:
+ return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+ }
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+ if isNilReply(line) {
+ return "", Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return "", err
+ }
+
+ b := make([]byte, replyLen+2)
+ _, err = io.ReadFull(r.rd, b)
+ if err != nil {
+ return "", err
+ }
+
+ return util.BytesToString(b[:replyLen]), nil
+}
+
+func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return nil, err
+ }
+ return m(r, n)
+ default:
+ return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadArrayLen() (int, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return 0, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return 0, ParseErrorReply(line)
+ case ArrayReply:
+ n, err := parseArrayLen(line)
+ if err != nil {
+ return 0, err
+ }
+ return int(n), nil
+ default:
+ return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+ }
+}
+
+func (r *Reader) ReadScanReply() ([]string, uint64, error) {
+ n, err := r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != 2 {
+ return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+ }
+
+ cursor, err := r.ReadUint()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ n, err = r.ReadArrayLen()
+ if err != nil {
+ return nil, 0, err
+ }
+
+ keys := make([]string, n)
+
+ for i := 0; i < n; i++ {
+ key, err := r.ReadString()
+ if err != nil {
+ return nil, 0, err
+ }
+ keys[i] = key
+ }
+
+ return keys, cursor, err
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseUint(b, 10, 64)
+}
+
+func (r *Reader) ReadFloatReply() (float64, error) {
+ b, err := r.readTmpBytesReply()
+ if err != nil {
+ return 0, err
+ }
+ return util.ParseFloat(b, 64)
+}
+
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+ line, err := r.ReadLine()
+ if err != nil {
+ return nil, err
+ }
+ switch line[0] {
+ case ErrorReply:
+ return nil, ParseErrorReply(line)
+ case StringReply:
+ return r._readTmpBytesReply(line)
+ case StatusReply:
+ return line[1:], nil
+ default:
+ return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+ }
+}
+
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+ if isNilReply(line) {
+ return nil, Nil
+ }
+
+ replyLen, err := util.Atoi(line[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ buf := r.buf(replyLen + 2)
+ _, err = io.ReadFull(r.rd, buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return buf[:replyLen], nil
+}
+
+func (r *Reader) buf(n int) []byte {
+ if n <= cap(r._buf) {
+ return r._buf[:n]
+ }
+ d := n - cap(r._buf)
+ r._buf = append(r._buf, make([]byte, d)...)
+ return r._buf
+}
+
+func isNilReply(b []byte) bool {
+ return len(b) == 3 &&
+ (b[0] == StringReply || b[0] == ArrayReply) &&
+ b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+ return RedisError(string(line[1:]))
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+ if isNilReply(line) {
+ return 0, Nil
+ }
+ return util.ParseInt(line[1:], 10, 64)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
new file mode 100644
index 0000000..08d18d3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
@@ -0,0 +1,173 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+// Scan parses bytes `b` to `v` with appropriate type.
+// nolint: gocyclo
+func Scan(b []byte, v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return fmt.Errorf("redis: Scan(nil)")
+ case *string:
+ *v = util.BytesToString(b)
+ return nil
+ case *[]byte:
+ *v = b
+ return nil
+ case *int:
+ var err error
+ *v, err = util.Atoi(b)
+ return err
+ case *int8:
+ n, err := util.ParseInt(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = int8(n)
+ return nil
+ case *int16:
+ n, err := util.ParseInt(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = int16(n)
+ return nil
+ case *int32:
+ n, err := util.ParseInt(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = int32(n)
+ return nil
+ case *int64:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *uint:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = uint(n)
+ return nil
+ case *uint8:
+ n, err := util.ParseUint(b, 10, 8)
+ if err != nil {
+ return err
+ }
+ *v = uint8(n)
+ return nil
+ case *uint16:
+ n, err := util.ParseUint(b, 10, 16)
+ if err != nil {
+ return err
+ }
+ *v = uint16(n)
+ return nil
+ case *uint32:
+ n, err := util.ParseUint(b, 10, 32)
+ if err != nil {
+ return err
+ }
+ *v = uint32(n)
+ return nil
+ case *uint64:
+ n, err := util.ParseUint(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = n
+ return nil
+ case *float32:
+ n, err := util.ParseFloat(b, 32)
+ if err != nil {
+ return err
+ }
+ *v = float32(n)
+ return err
+ case *float64:
+ var err error
+ *v, err = util.ParseFloat(b, 64)
+ return err
+ case *bool:
+ *v = len(b) == 1 && b[0] == '1'
+ return nil
+ case *time.Time:
+ var err error
+ *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
+ return err
+ case encoding.BinaryUnmarshaler:
+ return v.UnmarshalBinary(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+ }
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+ v := reflect.ValueOf(slice)
+ if !v.IsValid() {
+ return fmt.Errorf("redis: ScanSlice(nil)")
+ }
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Slice {
+ return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+ }
+
+ next := makeSliceNextElemFunc(v)
+ for i, s := range data {
+ elem := next()
+ if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+ elemType := v.Type().Elem()
+
+ if elemType.Kind() == reflect.Ptr {
+ elemType = elemType.Elem()
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ elem := v.Index(v.Len() - 1)
+ if elem.IsNil() {
+ elem.Set(reflect.New(elemType))
+ }
+ return elem.Elem()
+ }
+
+ elem := reflect.New(elemType)
+ v.Set(reflect.Append(v, elem))
+ return elem.Elem()
+ }
+ }
+
+ zero := reflect.Zero(elemType)
+ return func() reflect.Value {
+ if v.Len() < v.Cap() {
+ v.Set(v.Slice(0, v.Len()+1))
+ return v.Index(v.Len() - 1)
+ }
+
+ v.Set(reflect.Append(v, zero))
+ return v.Index(v.Len() - 1)
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
new file mode 100644
index 0000000..81b09b8
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
@@ -0,0 +1,153 @@
+package proto
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/util"
+)
+
+type writer interface {
+ io.Writer
+ io.ByteWriter
+ // io.StringWriter
+ WriteString(s string) (n int, err error)
+}
+
+type Writer struct {
+ writer
+
+ lenBuf []byte
+ numBuf []byte
+}
+
+func NewWriter(wr writer) *Writer {
+ return &Writer{
+ writer: wr,
+
+ lenBuf: make([]byte, 64),
+ numBuf: make([]byte, 64),
+ }
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+ if err := w.WriteByte(ArrayReply); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(args)); err != nil {
+ return err
+ }
+
+ for _, arg := range args {
+ if err := w.WriteArg(arg); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+ w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+ w.lenBuf = append(w.lenBuf, '\r', '\n')
+ _, err := w.Write(w.lenBuf)
+ return err
+}
+
+func (w *Writer) WriteArg(v interface{}) error {
+ switch v := v.(type) {
+ case nil:
+ return w.string("")
+ case string:
+ return w.string(v)
+ case []byte:
+ return w.bytes(v)
+ case int:
+ return w.int(int64(v))
+ case int8:
+ return w.int(int64(v))
+ case int16:
+ return w.int(int64(v))
+ case int32:
+ return w.int(int64(v))
+ case int64:
+ return w.int(v)
+ case uint:
+ return w.uint(uint64(v))
+ case uint8:
+ return w.uint(uint64(v))
+ case uint16:
+ return w.uint(uint64(v))
+ case uint32:
+ return w.uint(uint64(v))
+ case uint64:
+ return w.uint(v)
+ case float32:
+ return w.float(float64(v))
+ case float64:
+ return w.float(v)
+ case bool:
+ if v {
+ return w.int(1)
+ }
+ return w.int(0)
+ case time.Time:
+ w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+ return w.bytes(w.numBuf)
+ case encoding.BinaryMarshaler:
+ b, err := v.MarshalBinary()
+ if err != nil {
+ return err
+ }
+ return w.bytes(b)
+ default:
+ return fmt.Errorf(
+ "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+ }
+}
+
+func (w *Writer) bytes(b []byte) error {
+ if err := w.WriteByte(StringReply); err != nil {
+ return err
+ }
+
+ if err := w.writeLen(len(b)); err != nil {
+ return err
+ }
+
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+
+ return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+ return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+ w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+ w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+ w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+ return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+ if err := w.WriteByte('\r'); err != nil {
+ return err
+ }
+ return w.WriteByte('\n')
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
new file mode 100644
index 0000000..40676f3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
@@ -0,0 +1,45 @@
+package rand
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return pseudo.Int() }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Intn(n int) int { return pseudo.Intn(n) }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return pseudo.Int63n(n) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return pseudo.Perm(n) }
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as if
+// seeded by Seed(1).
+func Seed(n int64) { pseudo.Seed(n) }
+
+var pseudo = rand.New(&source{src: rand.NewSource(1)})
+
+type source struct {
+ src rand.Source
+ mu sync.Mutex
+}
+
+func (s *source) Int63() int64 {
+ s.mu.Lock()
+ n := s.src.Int63()
+ s.mu.Unlock()
+ return n
+}
+
+func (s *source) Seed(seed int64) {
+ s.mu.Lock()
+ s.src.Seed(seed)
+ s.mu.Unlock()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
new file mode 100644
index 0000000..862ff0e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package internal
+
+func String(b []byte) string {
+ return string(b)
+}
+
+func Bytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
new file mode 100644
index 0000000..4bc7970
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
@@ -0,0 +1,20 @@
+// +build !appengine
+
+package internal
+
+import "unsafe"
+
+// String converts byte slice to string.
+func String(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// Bytes converts string to byte slice.
+func Bytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
new file mode 100644
index 0000000..894382b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util.go
@@ -0,0 +1,81 @@
+package internal
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal/proto"
+ "github.com/go-redis/redis/v8/internal/util"
+ "go.opentelemetry.io/otel/api/global"
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+ return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ })
+}
+
+func ToLower(s string) string {
+ if isLower(s) {
+ return s
+ }
+
+ b := make([]byte, len(s))
+ for i := range b {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ b[i] = c
+ }
+ return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c >= 'A' && c <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+func Unwrap(err error) error {
+ u, ok := err.(interface {
+ Unwrap() error
+ })
+ if !ok {
+ return nil
+ }
+ return u.Unwrap()
+}
+
+//------------------------------------------------------------------------------
+
+func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
+ if span := trace.SpanFromContext(ctx); !span.IsRecording() {
+ return fn(ctx, span)
+ }
+
+ ctx, span := global.Tracer("github.com/go-redis/redis").Start(ctx, name)
+ defer span.End()
+
+ return fn(ctx, span)
+}
+
+func RecordError(ctx context.Context, err error) error {
+ if err != proto.Nil {
+ trace.SpanFromContext(ctx).RecordError(ctx, err)
+ }
+ return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
new file mode 100644
index 0000000..1b3060e
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+ return string(b)
+}
+
+func StringToBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
new file mode 100644
index 0000000..db50338
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+ return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+ return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+ return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+ return strconv.ParseFloat(BytesToString(b), bitSize)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
new file mode 100644
index 0000000..c9868aa
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
@@ -0,0 +1,22 @@
+// +build !appengine
+
+package util
+
+import (
+ "unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
diff --git a/vendor/github.com/go-redis/redis/v8/iterator.go b/vendor/github.com/go-redis/redis/v8/iterator.go
new file mode 100644
index 0000000..2f8bc2b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/iterator.go
@@ -0,0 +1,77 @@
+package redis
+
+import (
+ "context"
+ "sync"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+ mu sync.Mutex // protects Scanner and pos
+ cmd *ScanCmd
+ pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+ it.mu.Lock()
+ err := it.cmd.Err()
+ it.mu.Unlock()
+ return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next(ctx context.Context) bool {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ // Instantly return on errors.
+ if it.cmd.Err() != nil {
+ return false
+ }
+
+ // Advance cursor, check if we are still within range.
+ if it.pos < len(it.cmd.page) {
+ it.pos++
+ return true
+ }
+
+ for {
+ // Return if there is no more data to fetch.
+ if it.cmd.cursor == 0 {
+ return false
+ }
+
+ // Fetch next page.
+ switch it.cmd.args[0] {
+ case "scan", "qscan":
+ it.cmd.args[1] = it.cmd.cursor
+ default:
+ it.cmd.args[2] = it.cmd.cursor
+ }
+
+ err := it.cmd.process(ctx, it.cmd)
+ if err != nil {
+ return false
+ }
+
+ it.pos = 1
+
+ // Redis can occasionally return empty page.
+ if len(it.cmd.page) > 0 {
+ return true
+ }
+ }
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+ var v string
+ it.mu.Lock()
+ if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+ v = it.cmd.page[it.pos-1]
+ }
+ it.mu.Unlock()
+ return v
+}
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
new file mode 100644
index 0000000..f2c16c5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/options.go
@@ -0,0 +1,317 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "go.opentelemetry.io/otel/api/trace"
+ "go.opentelemetry.io/otel/label"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+ // Allow returns nil if operation is allowed or an error otherwise.
+ // If operation is allowed client must ReportResult of the operation
+ // whether it is a success or a failure.
+ Allow() error
+ // ReportResult reports the result of the previously allowed operation.
+ // nil indicates a success, non-nil error usually indicates a failure.
+ ReportResult(result error)
+}
+
+// Options keeps the settings to setup redis connection.
+type Options struct {
+ // The network type, either tcp or unix.
+ // Default is tcp.
+ Network string
+ // host:port address.
+ Addr string
+
+ // Dialer creates new network connection and has priority over
+ // Network and Addr options.
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+ // Hook that is called when new connection is established.
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ // Use the specified Username to authenticate the current connection
+ // with one of the connections defined in the ACL list when connecting
+ // to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+ Username string
+ // Optional password. Must match the password specified in the
+ // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
+ // or the User Password when connecting to a Redis 6.0 instance, or greater,
+ // that is using the Redis ACL system.
+ Password string
+
+ // Database to be selected after connecting to the server.
+ DB int
+
+ // Maximum number of retries before giving up.
+ // Default is 3 retries.
+ MaxRetries int
+ // Minimum backoff between each retry.
+ // Default is 8 milliseconds; -1 disables backoff.
+ MinRetryBackoff time.Duration
+ // Maximum backoff between each retry.
+ // Default is 512 milliseconds; -1 disables backoff.
+ MaxRetryBackoff time.Duration
+
+ // Dial timeout for establishing new connections.
+ // Default is 5 seconds.
+ DialTimeout time.Duration
+ // Timeout for socket reads. If reached, commands will fail
+ // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
+ // Default is 3 seconds.
+ ReadTimeout time.Duration
+ // Timeout for socket writes. If reached, commands will fail
+ // with a timeout instead of blocking.
+ // Default is ReadTimeout.
+ WriteTimeout time.Duration
+
+ // Maximum number of socket connections.
+ // Default is 10 connections per every CPU as reported by runtime.NumCPU.
+ PoolSize int
+ // Minimum number of idle connections which is useful when establishing
+ // new connection is slow.
+ MinIdleConns int
+ // Connection age at which client retires (closes) the connection.
+ // Default is to not close aged connections.
+ MaxConnAge time.Duration
+ // Amount of time client waits for connection if all connections
+ // are busy before returning an error.
+ // Default is ReadTimeout + 1 second.
+ PoolTimeout time.Duration
+ // Amount of time after which client closes idle connections.
+ // Should be less than server's timeout.
+ // Default is 5 minutes. -1 disables idle timeout check.
+ IdleTimeout time.Duration
+ // Frequency of idle checks made by idle connections reaper.
+ // Default is 1 minute. -1 disables idle connections reaper,
+ // but idle connections are still discarded by the client
+ // if IdleTimeout is set.
+ IdleCheckFrequency time.Duration
+
+ // Enables read only queries on slave nodes.
+ readOnly bool
+
+ // TLS Config to use. When set TLS will be negotiated.
+ TLSConfig *tls.Config
+
+ // Limiter interface used to implemented circuit breaker or rate limiter.
+ Limiter Limiter
+}
+
+func (opt *Options) init() {
+ if opt.Addr == "" {
+ opt.Addr = "localhost:6379"
+ }
+ if opt.Network == "" {
+ if strings.HasPrefix(opt.Addr, "/") {
+ opt.Network = "unix"
+ } else {
+ opt.Network = "tcp"
+ }
+ }
+ if opt.DialTimeout == 0 {
+ opt.DialTimeout = 5 * time.Second
+ }
+ if opt.Dialer == nil {
+ opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ netDialer := &net.Dialer{
+ Timeout: opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+ }
+ }
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 10 * runtime.NumCPU()
+ }
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+ if opt.PoolTimeout == 0 {
+ opt.PoolTimeout = opt.ReadTimeout + time.Second
+ }
+ if opt.IdleTimeout == 0 {
+ opt.IdleTimeout = 5 * time.Minute
+ }
+ if opt.IdleCheckFrequency == 0 {
+ opt.IdleCheckFrequency = time.Minute
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *Options) clone() *Options {
+ clone := *opt
+ return &clone
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+// Scheme is required.
+// There are two connection types: by tcp socket and by unix socket.
+// Tcp connection:
+// redis://<user>:<password>@<host>:<port>/<db_number>
+// Unix connection:
+// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+func ParseURL(redisURL string) (*Options, error) {
+ u, err := url.Parse(redisURL)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "redis", "rediss":
+ return setupTCPConn(u)
+ case "unix":
+ return setupUnixConn(u)
+ default:
+ return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+ }
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+ o := &Options{Network: "tcp"}
+
+ o.Username, o.Password = getUserPassword(u)
+
+ if len(u.Query()) > 0 {
+ return nil, errors.New("redis: no options supported")
+ }
+
+ h, p, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ h = u.Host
+ }
+ if h == "" {
+ h = "localhost"
+ }
+ if p == "" {
+ p = "6379"
+ }
+ o.Addr = net.JoinHostPort(h, p)
+
+ f := strings.FieldsFunc(u.Path, func(r rune) bool {
+ return r == '/'
+ })
+ switch len(f) {
+ case 0:
+ o.DB = 0
+ case 1:
+ if o.DB, err = strconv.Atoi(f[0]); err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+ }
+ default:
+ return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+ }
+
+ if u.Scheme == "rediss" {
+ o.TLSConfig = &tls.Config{ServerName: h}
+ }
+
+ return o, nil
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+ o := &Options{
+ Network: "unix",
+ }
+
+ if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+ return nil, errors.New("redis: empty unix socket path")
+ }
+ o.Addr = u.Path
+
+ o.Username, o.Password = getUserPassword(u)
+
+ dbStr := u.Query().Get("db")
+ if dbStr == "" {
+ return o, nil // if database is not set, connect to 0 db.
+ }
+
+ db, err := strconv.Atoi(dbStr)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %s", err)
+ }
+ o.DB = db
+
+ return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+ var user, password string
+ if u.User != nil {
+ user = u.User.Username()
+ if p, ok := u.User.Password(); ok {
+ password = p
+ }
+ }
+ return user, password
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+ return pool.NewConnPool(&pool.Options{
+ Dialer: func(ctx context.Context) (net.Conn, error) {
+ var conn net.Conn
+ err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error {
+ span.SetAttributes(
+ label.String("db.connection_string", opt.Addr),
+ )
+
+ var err error
+ conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
+ if err != nil {
+ _ = internal.RecordError(ctx, err)
+ }
+ return err
+ })
+ return conn, err
+ },
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ })
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
new file mode 100644
index 0000000..c6ec340
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/pipeline.go
@@ -0,0 +1,137 @@
+package redis
+
+import (
+ "context"
+ "sync"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// singe step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+ StatefulCmdable
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Close() error
+ Discard() error
+ Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+ cmdable
+ statefulCmdable
+
+ ctx context.Context
+ exec pipelineExecer
+
+ mu sync.Mutex
+ cmds []Cmder
+ closed bool
+}
+
+func (c *Pipeline) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+ c.mu.Lock()
+ c.cmds = append(c.cmds, cmd)
+ c.mu.Unlock()
+ return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+ c.mu.Lock()
+ _ = c.discard()
+ c.closed = true
+ c.mu.Unlock()
+ return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+ c.mu.Lock()
+ err := c.discard()
+ c.mu.Unlock()
+ return err
+}
+
+func (c *Pipeline) discard() error {
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.cmds = c.cmds[:0]
+ return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ if len(c.cmds) == 0 {
+ return nil, nil
+ }
+
+ cmds := c.cmds
+ c.cmds = nil
+
+ return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ if err := fn(c); err != nil {
+ return nil, err
+ }
+ cmds, err := c.Exec(ctx)
+ _ = c.Close()
+ return cmds, err
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+ return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
new file mode 100644
index 0000000..c56270b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/pubsub.go
@@ -0,0 +1,629 @@
+package redis
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+const (
+ pingTimeout = time.Second
+ chanSendTimeout = time.Minute
+)
+
+var errPingTimeout = errors.New("redis: ping timeout")
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+ opt *Options
+
+ newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
+ closeConn func(*pool.Conn) error
+
+ mu sync.Mutex
+ cn *pool.Conn
+ channels map[string]struct{}
+ patterns map[string]struct{}
+
+ closed bool
+ exit chan struct{}
+
+ cmd *Cmd
+
+ chOnce sync.Once
+ msgCh chan *Message
+ allCh chan interface{}
+ ping chan struct{}
+}
+
+func (c *PubSub) String() string {
+ channels := mapKeys(c.channels)
+ channels = append(channels, mapKeys(c.patterns)...)
+ return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
+}
+
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
+ c.mu.Lock()
+ cn, err := c.conn(ctx, nil)
+ c.mu.Unlock()
+ return cn, err
+}
+
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+ if c.cn != nil {
+ return c.cn, nil
+ }
+
+ channels := mapKeys(c.channels)
+ channels = append(channels, newChannels...)
+
+ cn, err := c.newConn(ctx, channels)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.resubscribe(ctx, cn); err != nil {
+ _ = c.closeConn(cn)
+ return nil, err
+ }
+
+ c.cn = cn
+ return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+ return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+}
+
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
+ var firstErr error
+
+ if len(c.channels) > 0 {
+ firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
+ }
+
+ if len(c.patterns) > 0 {
+ err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+ s := make([]string, len(m))
+ i := 0
+ for k := range m {
+ s[i] = k
+ i++
+ }
+ return s
+}
+
+func (c *PubSub) _subscribe(
+ ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
+) error {
+ args := make([]interface{}, 0, 1+len(channels))
+ args = append(args, redisCmd)
+ for _, channel := range channels {
+ args = append(args, channel)
+ }
+ cmd := NewSliceCmd(ctx, args...)
+ return c.writeCmd(ctx, cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(
+ ctx context.Context,
+ cn *pool.Conn,
+ err error,
+ allowTimeout bool,
+) {
+ c.mu.Lock()
+ c.releaseConn(ctx, cn, err, allowTimeout)
+ c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
+ if c.cn != cn {
+ return
+ }
+ if isBadConn(err, allowTimeout) {
+ c.reconnect(ctx, err)
+ }
+}
+
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
+ _ = c.closeTheCn(reason)
+ _, _ = c.conn(ctx, nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+ if c.cn == nil {
+ return nil
+ }
+ if !c.closed {
+ internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
+ }
+ err := c.closeConn(c.cn)
+ c.cn = nil
+ return err
+}
+
+func (c *PubSub) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return pool.ErrClosed
+ }
+ c.closed = true
+ close(c.exit)
+
+ return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "subscribe", channels...)
+ if c.channels == nil {
+ c.channels = make(map[string]struct{})
+ }
+ for _, s := range channels {
+ c.channels[s] = struct{}{}
+ }
+ return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ err := c.subscribe(ctx, "psubscribe", patterns...)
+ if c.patterns == nil {
+ c.patterns = make(map[string]struct{})
+ }
+ for _, s := range patterns {
+ c.patterns[s] = struct{}{}
+ }
+ return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, channel := range channels {
+ delete(c.channels, channel)
+ }
+ err := c.subscribe(ctx, "unsubscribe", channels...)
+ return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for _, pattern := range patterns {
+ delete(c.patterns, pattern)
+ }
+ err := c.subscribe(ctx, "punsubscribe", patterns...)
+ return err
+}
+
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+ cn, err := c.conn(ctx, channels)
+ if err != nil {
+ return err
+ }
+
+ err = c._subscribe(ctx, cn, redisCmd, channels)
+ c.releaseConn(ctx, cn, err, false)
+ return err
+}
+
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
+ args := []interface{}{"ping"}
+ if len(payload) == 1 {
+ args = append(args, payload[0])
+ }
+ cmd := NewCmd(ctx, args...)
+
+ cn, err := c.connWithLock(ctx)
+ if err != nil {
+ return err
+ }
+
+ err = c.writeCmd(ctx, cn, cmd)
+ c.releaseConnWithLock(ctx, cn, err, false)
+ return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+ // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+ Kind string
+ // Channel name we have subscribed to.
+ Channel string
+ // Number of channels we are currently subscribed to.
+ Count int
+}
+
+func (m *Subscription) String() string {
+ return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+ Channel string
+ Pattern string
+ Payload string
+ PayloadSlice []string
+}
+
+func (m *Message) String() string {
+ return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+ Payload string
+}
+
+func (p *Pong) String() string {
+ if p.Payload != "" {
+ return fmt.Sprintf("Pong<%s>", p.Payload)
+ }
+ return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+ switch reply := reply.(type) {
+ case string:
+ return &Pong{
+ Payload: reply,
+ }, nil
+ case []interface{}:
+ switch kind := reply[0].(string); kind {
+ case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+ // Can be nil in case of "unsubscribe".
+ channel, _ := reply[1].(string)
+ return &Subscription{
+ Kind: kind,
+ Channel: channel,
+ Count: int(reply[2].(int64)),
+ }, nil
+ case "message":
+ switch payload := reply[2].(type) {
+ case string:
+ return &Message{
+ Channel: reply[1].(string),
+ Payload: payload,
+ }, nil
+ case []interface{}:
+ ss := make([]string, len(payload))
+ for i, s := range payload {
+ ss[i] = s.(string)
+ }
+ return &Message{
+ Channel: reply[1].(string),
+ PayloadSlice: ss,
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+ }
+ case "pmessage":
+ return &Message{
+ Pattern: reply[1].(string),
+ Channel: reply[2].(string),
+ Payload: reply[3].(string),
+ }, nil
+ case "pong":
+ return &Pong{
+ Payload: reply[1].(string),
+ }, nil
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+ }
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
+ if c.cmd == nil {
+ c.cmd = NewCmd(ctx)
+ }
+
+ cn, err := c.connWithLock(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+ return c.cmd.readReply(rd)
+ })
+
+ c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+ if err != nil {
+ return nil, err
+ }
+
+ return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+ return c.ReceiveTimeout(ctx, 0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ return msg, nil
+ default:
+ err := fmt.Errorf("redis: unknown message: %T", msg)
+ return nil, err
+ }
+ }
+}
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel() <-chan *Message {
+ return c.ChannelSize(100)
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+ c.chOnce.Do(func() {
+ c.initPing()
+ c.initMsgChan(size)
+ })
+ if c.msgCh == nil {
+ err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+ panic(err)
+ }
+ if cap(c.msgCh) != size {
+ err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+ panic(err)
+ }
+ return c.msgCh
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
+ c.chOnce.Do(func() {
+ c.initPing()
+ c.initAllChan(size)
+ })
+ if c.allCh == nil {
+ err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+ panic(err)
+ }
+ if cap(c.allCh) != size {
+ err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
+ panic(err)
+ }
+ return c.allCh
+}
+
+func (c *PubSub) getContext() context.Context {
+ if c.cmd != nil {
+ return c.cmd.ctx
+ }
+ return context.Background()
+}
+
+func (c *PubSub) initPing() {
+ ctx := context.TODO()
+ c.ping = make(chan struct{}, 1)
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ healthy := true
+ for {
+ timer.Reset(pingTimeout)
+ select {
+ case <-c.ping:
+ healthy = true
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ pingErr := c.Ping(ctx)
+ if healthy {
+ healthy = false
+ } else {
+ if pingErr == nil {
+ pingErr = errPingTimeout
+ }
+ c.mu.Lock()
+ c.reconnect(ctx, pingErr)
+ healthy = true
+ c.mu.Unlock()
+ }
+ case <-c.exit:
+ return
+ }
+ }
+ }()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *PubSub) initMsgChan(size int) {
+ ctx := context.TODO()
+ c.msgCh = make(chan *Message, size)
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.msgCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ // Ignore.
+ case *Pong:
+ // Ignore.
+ case *Message:
+ timer.Reset(chanSendTimeout)
+ select {
+ case c.msgCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ c.getContext(),
+ "redis: %s channel is full for %s (message is dropped)",
+ c,
+ chanSendTimeout,
+ )
+ }
+ default:
+ internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *PubSub) initAllChan(size int) {
+ ctx := context.TODO()
+ c.allCh = make(chan interface{}, size)
+ go func() {
+ timer := time.NewTimer(pingTimeout)
+ timer.Stop()
+
+ var errCount int
+ for {
+ msg, err := c.Receive(ctx)
+ if err != nil {
+ if err == pool.ErrClosed {
+ close(c.allCh)
+ return
+ }
+ if errCount > 0 {
+ time.Sleep(100 * time.Millisecond)
+ }
+ errCount++
+ continue
+ }
+
+ errCount = 0
+
+ // Any message is as good as a ping.
+ select {
+ case c.ping <- struct{}{}:
+ default:
+ }
+
+ switch msg := msg.(type) {
+ case *Subscription:
+ c.sendMessage(msg, timer)
+ case *Pong:
+ // Ignore.
+ case *Message:
+ c.sendMessage(msg, timer)
+ default:
+ internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+ }
+ }
+ }()
+}
+
+func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
+ timer.Reset(pingTimeout)
+ select {
+ case c.allCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ c.getContext(),
+ "redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
new file mode 100644
index 0000000..efad7f1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/redis.go
@@ -0,0 +1,783 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+ "go.opentelemetry.io/otel/api/trace"
+ "go.opentelemetry.io/otel/label"
+)
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+func SetLogger(logger internal.Logging) {
+ internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+ BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
+ AfterProcess(ctx context.Context, cmd Cmder) error
+
+ BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
+ AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
+}
+
+type hooks struct {
+ hooks []Hook
+}
+
+func (hs *hooks) lock() {
+ hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
+}
+
+func (hs hooks) clone() hooks {
+ clone := hs
+ clone.lock()
+ return clone
+}
+
+func (hs *hooks) AddHook(hook Hook) {
+ hs.hooks = append(hs.hooks, hook)
+}
+
+func (hs hooks) process(
+ ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
+) error {
+ if len(hs.hooks) == 0 {
+ err := hs.withContext(ctx, func() error {
+ return fn(ctx, cmd)
+ })
+ cmd.SetErr(err)
+ return err
+ }
+
+ var hookIndex int
+ var retErr error
+
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
+ if retErr != nil {
+ cmd.SetErr(retErr)
+ }
+ }
+
+ if retErr == nil {
+ retErr = hs.withContext(ctx, func() error {
+ return fn(ctx, cmd)
+ })
+ cmd.SetErr(retErr)
+ }
+
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
+ retErr = err
+ cmd.SetErr(retErr)
+ }
+ }
+
+ return retErr
+}
+
+func (hs hooks) processPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ if len(hs.hooks) == 0 {
+ err := hs.withContext(ctx, func() error {
+ return fn(ctx, cmds)
+ })
+ return err
+ }
+
+ var hookIndex int
+ var retErr error
+
+ for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+ ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
+ if retErr != nil {
+ setCmdsErr(cmds, retErr)
+ }
+ }
+
+ if retErr == nil {
+ retErr = hs.withContext(ctx, func() error {
+ return fn(ctx, cmds)
+ })
+ }
+
+ for hookIndex--; hookIndex >= 0; hookIndex-- {
+ if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
+ retErr = err
+ setCmdsErr(cmds, retErr)
+ }
+ }
+
+ return retErr
+}
+
+func (hs hooks) processTxPipeline(
+ ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+ cmds = wrapMultiExec(ctx, cmds)
+ return hs.processPipeline(ctx, cmds, fn)
+}
+
+func (hs hooks) withContext(ctx context.Context, fn func() error) error {
+ done := ctx.Done()
+ if done == nil {
+ return fn()
+ }
+
+ errc := make(chan error, 1)
+ go func() { errc <- fn() }()
+
+ select {
+ case <-done:
+ return ctx.Err()
+ case err := <-errc:
+ return err
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+ opt *Options
+ connPool pool.Pooler
+
+ onClose func() error // hook called when client is closed
+}
+
+func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
+ return &baseClient{
+ opt: opt,
+ connPool: connPool,
+ }
+}
+
+func (c *baseClient) clone() *baseClient {
+ clone := *c
+ return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+ opt := c.opt.clone()
+ opt.ReadTimeout = timeout
+ opt.WriteTimeout = timeout
+
+ clone := c.clone()
+ clone.opt = opt
+
+ return clone
+}
+
+func (c *baseClient) String() string {
+ return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.NewConn(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initConn(ctx, cn)
+ if err != nil {
+ _ = c.connPool.CloseConn(cn)
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+ if c.opt.Limiter != nil {
+ err := c.opt.Limiter.Allow()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ cn, err := c._getConn(ctx)
+ if err != nil {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+ cn, err := c.connPool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ if cn.Inited {
+ return cn, nil
+ }
+
+ err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error {
+ return c.initConn(ctx, cn)
+ })
+ if err != nil {
+ c.connPool.Remove(ctx, cn, err)
+ if err := internal.Unwrap(err); err != nil {
+ return nil, err
+ }
+ return nil, err
+ }
+
+ return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+ if cn.Inited {
+ return nil
+ }
+ cn.Inited = true
+
+ if c.opt.Password == "" &&
+ c.opt.DB == 0 &&
+ !c.opt.readOnly &&
+ c.opt.OnConnect == nil {
+ return nil
+ }
+
+ connPool := pool.NewSingleConnPool(c.connPool, cn)
+ conn := newConn(ctx, c.opt, connPool)
+
+ _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+ if c.opt.Password != "" {
+ if c.opt.Username != "" {
+ pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
+ } else {
+ pipe.Auth(ctx, c.opt.Password)
+ }
+ }
+
+ if c.opt.DB > 0 {
+ pipe.Select(ctx, c.opt.DB)
+ }
+
+ if c.opt.readOnly {
+ pipe.ReadOnly(ctx)
+ }
+
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if c.opt.OnConnect != nil {
+ return c.opt.OnConnect(ctx, conn)
+ }
+ return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+ if c.opt.Limiter != nil {
+ c.opt.Limiter.ReportResult(err)
+ }
+
+ if isBadConn(err, false) {
+ c.connPool.Remove(ctx, cn, err)
+ } else {
+ c.connPool.Put(ctx, cn)
+ }
+}
+
+func (c *baseClient) withConn(
+ ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+ return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error {
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
+
+ if span.IsRecording() {
+ if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
+ span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
+ }
+ }
+
+ defer func() {
+ c.releaseConn(ctx, cn, err)
+ }()
+
+ err = fn(ctx, cn)
+ return err
+ })
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ attempt := attempt
+
+ var retry bool
+ err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ retryTimeout := true
+ err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+ if err != nil {
+ return err
+ }
+
+ err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+ if err != nil {
+ retryTimeout = cmd.readTimeout() == nil
+ return err
+ }
+
+ return nil
+ })
+ if err == nil {
+ return nil
+ }
+ retry = shouldRetry(err, retryTimeout)
+ return err
+ })
+ if err == nil || !retry {
+ return err
+ }
+ lastErr = err
+ }
+ return lastErr
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+ if timeout := cmd.readTimeout(); timeout != nil {
+ t := *timeout
+ if t == 0 {
+ return 0
+ }
+ return t + 10*time.Second
+ }
+ return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+ var firstErr error
+ if c.onClose != nil {
+ if err := c.onClose(); err != nil {
+ firstErr = err
+ }
+ }
+ if err := c.connPool.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+ return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ err := c._generalProcessPipeline(ctx, cmds, p)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) _generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ var canRetry bool
+ lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ var err error
+ canRetry, err = p(ctx, cn, cmds)
+ return err
+ })
+ if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return pipelineReadCmds(rd, cmds)
+ })
+ return true, err
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ cmd.SetErr(err)
+ if err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+ ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmds(wr, cmds)
+ })
+ if err != nil {
+ return true, err
+ }
+
+ err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ statusCmd := cmds[0].(*StatusCmd)
+ // Trim multi and exec.
+ cmds = cmds[1 : len(cmds)-1]
+
+ err := txPipelineReadQueued(rd, statusCmd, cmds)
+ if err != nil {
+ return err
+ }
+
+ return pipelineReadCmds(rd, cmds)
+ })
+ return false, err
+}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+ if len(cmds) == 0 {
+ panic("not reached")
+ }
+ cmdCopy := make([]Cmder, len(cmds)+2)
+ cmdCopy[0] = NewStatusCmd(ctx, "multi")
+ copy(cmdCopy[1:], cmds)
+ cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
+ return cmdCopy
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+ // Parse queued replies.
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for range cmds {
+ if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+ return err
+ }
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ return proto.ParseErrorReply(line)
+ case proto.ArrayReply:
+ // ok
+ default:
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ return err
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+ *baseClient
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+ opt.init()
+
+ c := Client{
+ baseClient: newBaseClient(opt, newConnPool(opt)),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+
+ return &c
+}
+
+func (c *Client) clone() *Client {
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ return &clone
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+ clone := c.clone()
+ clone.baseClient = c.baseClient.withTimeout(timeout)
+ return clone
+}
+
+func (c *Client) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := c.clone()
+ clone.ctx = ctx
+ return clone
+}
+
+func (c *Client) Conn(ctx context.Context) *Conn {
+ return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+ return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+ stats := c.connPool.Stats()
+ return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+// sub := client.Subscribe(queryResp)
+// iface, err := sub.Receive()
+// if err != nil {
+// // handle error
+// }
+//
+// // Should be *Subscription, but others are possible if other actions have been
+// // taken on sub since it was created.
+// switch iface.(type) {
+// case *Subscription:
+// // subscribe succeeded
+// case *Message:
+// // received first message
+// case *Pong:
+// // pong received
+// default:
+// // handle error
+// }
+//
+// ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooks // TODO: inherit hooks
+}
+
+// Conn is like Client, but its pool contains single connection.
+type Conn struct {
+ *conn
+ ctx context.Context
+}
+
+func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
+ c := Conn{
+ conn: &conn{
+ baseClient: baseClient{
+ opt: opt,
+ connPool: connPool,
+ },
+ },
+ ctx: ctx,
+ }
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+ return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v8/renovate.json b/vendor/github.com/go-redis/redis/v8/renovate.json
new file mode 100644
index 0000000..f45d8f1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/renovate.json
@@ -0,0 +1,5 @@
+{
+ "extends": [
+ "config:base"
+ ]
+}
diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/go-redis/redis/v8/result.go
new file mode 100644
index 0000000..24cfd49
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/result.go
@@ -0,0 +1,180 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing.
+func NewCmdResult(val interface{}, err error) *Cmd {
+ var cmd Cmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+ var cmd SliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
+func NewStatusResult(val string, err error) *StatusCmd {
+ var cmd StatusCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing.
+func NewIntResult(val int64, err error) *IntCmd {
+ var cmd IntCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+ var cmd DurationCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
+func NewBoolResult(val bool, err error) *BoolCmd {
+ var cmd BoolCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing.
+func NewStringResult(val string, err error) *StringCmd {
+ var cmd StringCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
+func NewFloatResult(val float64, err error) *FloatCmd {
+ var cmd FloatCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+ var cmd StringSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+ var cmd BoolSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+ var cmd StringStringMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+ var cmd StringIntMapCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+ var cmd TimeCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+ var cmd ZSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+ var cmd ZWithKeyCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+ var cmd ScanCmd
+ cmd.page = keys
+ cmd.cursor = cursor
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+ var cmd ClusterSlotsCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+ var cmd GeoLocationCmd
+ cmd.locations = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+ var cmd GeoPosCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+ var cmd CommandsInfoCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+ var cmd XMessageSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+ var cmd XStreamSliceCmd
+ cmd.val = val
+ cmd.SetErr(err)
+ return &cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
new file mode 100644
index 0000000..34d05f3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/ring.go
@@ -0,0 +1,731 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/dgryski/go-rendezvous"
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hashtag"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+ Get(string) string
+}
+
+type rendezvousWrapper struct {
+ *rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+ return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+ return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+ // Map of name => host:port addresses of ring shards.
+ Addrs map[string]string
+
+ // NewClient creates a shard client with provided name and options.
+ NewClient func(name string, opt *Options) *Client
+
+ // Frequency of PING commands sent to check shards availability.
+ // Shard is considered down after 3 subsequent failed checks.
+ HeartbeatFrequency time.Duration
+
+ // NewConsistentHash returns a consistent hash that is used
+ // to distribute keys across the shards.
+ //
+ // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+ // for consistent hashing algorithmic tradeoffs.
+ NewConsistentHash func(shards []string) ConsistentHash
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+ Limiter Limiter
+}
+
+func (opt *RingOptions) init() {
+ if opt.NewClient == nil {
+ opt.NewClient = func(name string, opt *Options) *Client {
+ return NewClient(opt)
+ }
+ }
+
+ if opt.HeartbeatFrequency == 0 {
+ opt.HeartbeatFrequency = 500 * time.Millisecond
+ }
+
+ if opt.NewConsistentHash == nil {
+ opt.NewConsistentHash = newRendezvous
+ }
+
+ if opt.MaxRetries == -1 {
+ opt.MaxRetries = 0
+ } else if opt.MaxRetries == 0 {
+ opt.MaxRetries = 3
+ }
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+ return &Options{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+ DB: opt.DB,
+
+ MaxRetries: -1,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+
+ TLSConfig: opt.TLSConfig,
+ Limiter: opt.Limiter,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+ Client *Client
+ down int32
+}
+
+func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+ clopt := opt.clientOptions()
+ clopt.Addr = addr
+
+ return &ringShard{
+ Client: opt.NewClient(name, clopt),
+ }
+}
+
+func (shard *ringShard) String() string {
+ var state string
+ if shard.IsUp() {
+ state = "up"
+ } else {
+ state = "down"
+ }
+ return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+ const threshold = 3
+ return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+ return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+ if up {
+ changed := shard.IsDown()
+ atomic.StoreInt32(&shard.down, 0)
+ return changed
+ }
+
+ if shard.IsDown() {
+ return false
+ }
+
+ atomic.AddInt32(&shard.down, 1)
+ return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringShards struct {
+ opt *RingOptions
+
+ mu sync.RWMutex
+ hash ConsistentHash
+ shards map[string]*ringShard // read only
+ list []*ringShard // read only
+ numShard int
+ closed bool
+}
+
+func newRingShards(opt *RingOptions) *ringShards {
+ shards := make(map[string]*ringShard, len(opt.Addrs))
+ list := make([]*ringShard, 0, len(shards))
+
+ for name, addr := range opt.Addrs {
+ shard := newRingShard(opt, name, addr)
+ shards[name] = shard
+
+ list = append(list, shard)
+ }
+
+ c := &ringShards{
+ opt: opt,
+
+ shards: shards,
+ list: list,
+ }
+ c.rebalance()
+
+ return c
+}
+
+func (c *ringShards) List() []*ringShard {
+ var list []*ringShard
+
+ c.mu.RLock()
+ if !c.closed {
+ list = c.list
+ }
+ c.mu.RUnlock()
+
+ return list
+}
+
+func (c *ringShards) Hash(key string) string {
+ key = hashtag.Key(key)
+
+ var hash string
+
+ c.mu.RLock()
+ if c.numShard > 0 {
+ hash = c.hash.Get(key)
+ }
+ c.mu.RUnlock()
+
+ return hash
+}
+
+func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+ key = hashtag.Key(key)
+
+ c.mu.RLock()
+
+ if c.closed {
+ c.mu.RUnlock()
+ return nil, pool.ErrClosed
+ }
+
+ if c.numShard == 0 {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ hash := c.hash.Get(key)
+ if hash == "" {
+ c.mu.RUnlock()
+ return nil, errRingShardsDown
+ }
+
+ shard := c.shards[hash]
+ c.mu.RUnlock()
+
+ return shard, nil
+}
+
+func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+ if shardName == "" {
+ return c.Random()
+ }
+
+ c.mu.RLock()
+ shard := c.shards[shardName]
+ c.mu.RUnlock()
+ return shard, nil
+}
+
+func (c *ringShards) Random() (*ringShard, error) {
+ return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *ringShards) Heartbeat(frequency time.Duration) {
+ ticker := time.NewTicker(frequency)
+ defer ticker.Stop()
+
+ ctx := context.Background()
+ for range ticker.C {
+ var rebalance bool
+
+ for _, shard := range c.List() {
+ err := shard.Client.Ping(ctx).Err()
+ isUp := err == nil || err == pool.ErrPoolTimeout
+ if shard.Vote(isUp) {
+ internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
+ rebalance = true
+ }
+ }
+
+ if rebalance {
+ c.rebalance()
+ }
+ }
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *ringShards) rebalance() {
+ c.mu.RLock()
+ shards := c.shards
+ c.mu.RUnlock()
+
+ liveShards := make([]string, 0, len(shards))
+
+ for name, shard := range shards {
+ if shard.IsUp() {
+ liveShards = append(liveShards, name)
+ }
+ }
+
+ hash := c.opt.NewConsistentHash(liveShards)
+
+ c.mu.Lock()
+ c.hash = hash
+ c.numShard = len(liveShards)
+ c.mu.Unlock()
+}
+
+func (c *ringShards) Len() int {
+ c.mu.RLock()
+ l := c.numShard
+ c.mu.RUnlock()
+ return l
+}
+
+func (c *ringShards) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, shard := range c.shards {
+ if err := shard.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+ c.hash = nil
+ c.shards = nil
+ c.list = nil
+
+ return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type ring struct {
+ opt *RingOptions
+ shards *ringShards
+ cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+ *ring
+ cmdable
+ hooks
+ ctx context.Context
+}
+
+func NewRing(opt *RingOptions) *Ring {
+ opt.init()
+
+ ring := Ring{
+ ring: &ring{
+ opt: opt,
+ shards: newRingShards(opt),
+ },
+ ctx: context.Background(),
+ }
+
+ ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+ ring.cmdable = ring.Process
+
+ go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+
+ return &ring
+}
+
+func (c *Ring) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Ring) WithContext(ctx context.Context) *Ring {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.cmdable = clone.Process
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+ cmd := NewCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.process)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+ return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+ shards := c.shards.List()
+ var acc PoolStats
+ for _, shard := range shards {
+ s := shard.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ }
+ return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+ return c.shards.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ if len(channels) == 0 {
+ panic("at least one channel is required")
+ }
+
+ shard, err := c.shards.GetByKey(channels[0])
+ if err != nil {
+ // TODO: return PubSub with sticky error
+ panic(err)
+ }
+ return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+ ctx context.Context,
+ fn func(ctx context.Context, client *Client) error,
+) error {
+ shards := c.shards.List()
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, shard := range shards {
+ if shard.IsDown() {
+ continue
+ }
+
+ wg.Add(1)
+ go func(shard *ringShard) {
+ defer wg.Done()
+ err := fn(ctx, shard.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(shard)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+ shards := c.shards.List()
+ var firstErr error
+ for _, shard := range shards {
+ cmdsInfo, err := shard.Client.Command(ctx).Result()
+ if err == nil {
+ return cmdsInfo, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ if firstErr == nil {
+ return nil, errRingShardsDown
+ }
+ return nil, firstErr
+}
+
+func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+ if err != nil {
+ return nil
+ }
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ pos := cmdFirstKeyPos(cmd, cmdInfo)
+ if pos == 0 {
+ return c.shards.Random()
+ }
+ firstKey := cmd.stringArg(pos)
+ return c.shards.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+ var lastErr error
+ for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return err
+ }
+ }
+
+ shard, err := c.cmdShard(ctx, cmd)
+ if err != nil {
+ return err
+ }
+
+ lastErr = shard.Client.Process(ctx, cmd)
+ if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, false)
+ })
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: c.processTxPipeline,
+ }
+ pipe.init()
+ return &pipe
+}
+
+func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+ return c.generalProcessPipeline(ctx, cmds, true)
+ })
+}
+
+func (c *Ring) generalProcessPipeline(
+ ctx context.Context, cmds []Cmder, tx bool,
+) error {
+ cmdsMap := make(map[string][]Cmder)
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(ctx, cmd.Name())
+ hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+ if hash != "" {
+ hash = c.shards.Hash(hash)
+ }
+ cmdsMap[hash] = append(cmdsMap[hash], cmd)
+ }
+
+ var wg sync.WaitGroup
+ for hash, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(hash string, cmds []Cmder) {
+ defer wg.Done()
+
+ _ = c.processShardPipeline(ctx, hash, cmds, tx)
+ }(hash, cmds)
+ }
+
+ wg.Wait()
+ return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) processShardPipeline(
+ ctx context.Context, hash string, cmds []Cmder, tx bool,
+) error {
+ // TODO: retry?
+ shard, err := c.shards.GetByName(hash)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ if tx {
+ return shard.Client.processTxPipeline(ctx, cmds)
+ }
+ return shard.Client.processPipeline(ctx, cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ var shards []*ringShard
+ for _, key := range keys {
+ if key != "" {
+ shard, err := c.shards.GetByKey(hashtag.Key(key))
+ if err != nil {
+ return err
+ }
+
+ shards = append(shards, shard)
+ }
+ }
+
+ if len(shards) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one shard")
+ }
+
+ if len(shards) > 1 {
+ for _, shard := range shards[1:] {
+ if shard.Client != shards[0].Client {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+ return err
+ }
+ }
+ }
+
+ return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+ return c.shards.Close()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
new file mode 100644
index 0000000..07ed482
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/script.go
@@ -0,0 +1,65 @@
+package redis
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+type scripter interface {
+ Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+ ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+ _ scripter = (*Client)(nil)
+ _ scripter = (*Ring)(nil)
+ _ scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+ src, hash string
+}
+
+func NewScript(src string) *Script {
+ h := sha1.New()
+ _, _ = io.WriteString(h, src)
+ return &Script{
+ src: src,
+ hash: hex.EncodeToString(h.Sum(nil)),
+ }
+}
+
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c scripter) *StringCmd {
+ return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c scripter) *BoolSliceCmd {
+ return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+ return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+ r := s.EvalSha(ctx, c, keys, args...)
+ if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+ return s.Eval(ctx, c, keys, args...)
+ }
+ return r
+}
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
new file mode 100644
index 0000000..7db9843
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/sentinel.go
@@ -0,0 +1,731 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+ // The master name.
+ MasterName string
+ // A seed list of host:port addresses of sentinel nodes.
+ SentinelAddrs []string
+ // Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
+ SentinelPassword string
+
+ // Allows routing read-only commands to the closest master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // This option only works with NewFailoverClusterClient.
+ RouteRandomly bool
+
+ // Route all commands to slave read-only nodes.
+ SlaveOnly bool
+
+ // Following options are copied from Options struct.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ DB int
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+ return &Options{
+ Addr: "FailoverClient",
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: opt.DB,
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+ return &Options{
+ Addr: addr,
+
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ DB: 0,
+ Password: opt.SentinelPassword,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+ return &ClusterOptions{
+ Dialer: opt.Dialer,
+ OnConnect: opt.OnConnect,
+
+ Username: opt.Username,
+ Password: opt.Password,
+
+ MaxRedirects: opt.MaxRetries,
+
+ RouteByLatency: opt.RouteByLatency,
+ RouteRandomly: opt.RouteRandomly,
+
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: opt.IdleCheckFrequency,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+ if failoverOpt.RouteByLatency {
+ panic("to route commands by latency, use NewFailoverClusterClient")
+ }
+ if failoverOpt.RouteRandomly {
+ panic("to route commands randomly, use NewFailoverClusterClient")
+ }
+
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clientOptions()
+ opt.Dialer = masterSlaveDialer(failover)
+ opt.init()
+
+ connPool := newConnPool(opt)
+ failover.onFailover = func(ctx context.Context, addr string) {
+ _ = connPool.Filter(func(cn *pool.Conn) bool {
+ return cn.RemoteAddr().String() != addr
+ })
+ }
+
+ c := Client{
+ baseClient: newBaseClient(opt, connPool),
+ ctx: context.Background(),
+ }
+ c.cmdable = c.Process
+ c.onClose = failover.Close
+
+ return &c
+}
+
+func masterSlaveDialer(
+ failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
+ var addr string
+ var err error
+
+ if failover.opt.SlaveOnly {
+ addr, err = failover.RandomSlaveAddr(ctx)
+ } else {
+ addr, err = failover.MasterAddr(ctx)
+ if err == nil {
+ failover.trySwitchMaster(ctx, addr)
+ }
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ if failover.opt.Dialer != nil {
+ return failover.opt.Dialer(ctx, network, addr)
+ }
+ return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
+ }
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+ *baseClient
+ hooks
+ ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+ opt.init()
+ c := &SentinelClient{
+ baseClient: &baseClient{
+ opt: opt,
+ connPool: newConnPool(opt),
+ },
+ ctx: context.Background(),
+ }
+ return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+ return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+ pubsub := &PubSub{
+ opt: c.opt,
+
+ newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+ return c.newConn(ctx)
+ },
+ closeConn: c.connPool.CloseConn,
+ }
+ pubsub.init()
+ return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+ cmd := NewStringCmd(ctx, "ping")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(ctx, channels...)
+ }
+ return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+ cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "masters")
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
+ cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+ cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+ opt *FailoverOptions
+
+ sentinelAddrs []string
+
+ onFailover func(ctx context.Context, addr string)
+ onUpdate func(ctx context.Context)
+
+ mu sync.RWMutex
+ _masterAddr string
+ sentinel *SentinelClient
+ pubsub *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.sentinel != nil {
+ return c.closeSentinel()
+ }
+ return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+ firstErr := c.pubsub.Close()
+ c.pubsub = nil
+
+ err := c.sentinel.Close()
+ if err != nil && firstErr == nil {
+ firstErr = err
+ }
+ c.sentinel = nil
+
+ return firstErr
+}
+
+func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+ addresses, err := c.slaveAddrs(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(addresses) == 0 {
+ return c.MasterAddr(ctx)
+ }
+ return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addr := c.getMasterAddr(ctx, sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addr := c.getMasterAddr(ctx, c.sentinel)
+ if addr != "" {
+ return addr, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+ return addr, nil
+ }
+
+ return "", errors.New("redis: all sentinels are unreachable")
+}
+
+func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
+ c.mu.RLock()
+ sentinel := c.sentinel
+ c.mu.RUnlock()
+
+ if sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.sentinel != nil {
+ addrs := c.getSlaveAddrs(ctx, c.sentinel)
+ if len(addrs) > 0 {
+ return addrs, nil
+ }
+ _ = c.closeSentinel()
+ }
+
+ for i, sentinelAddr := range c.sentinelAddrs {
+ sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+ slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
+ c.opt.MasterName, err)
+ _ = sentinel.Close()
+ continue
+ }
+
+ // Push working sentinel to the top.
+ c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+ c.setSentinel(ctx, sentinel)
+
+ addrs := parseSlaveAddrs(slaves)
+ return addrs, nil
+ }
+
+ return []string{}, errors.New("redis: all sentinels are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+ addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+ c.opt.MasterName, err)
+ return ""
+ }
+ return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
+ addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+ c.opt.MasterName, err)
+ return []string{}
+ }
+ return parseSlaveAddrs(addrs)
+}
+
+func parseSlaveAddrs(addrs []interface{}) []string {
+ nodes := make([]string, 0, len(addrs))
+
+ for _, node := range addrs {
+ ip := ""
+ port := ""
+ flags := []string{}
+ lastkey := ""
+ isDown := false
+
+ for _, key := range node.([]interface{}) {
+ switch lastkey {
+ case "ip":
+ ip = key.(string)
+ case "port":
+ port = key.(string)
+ case "flags":
+ flags = strings.Split(key.(string), ",")
+ }
+ lastkey = key.(string)
+ }
+
+ for _, flag := range flags {
+ switch flag {
+ case "s_down", "o_down", "disconnected":
+ isDown = true
+ }
+ }
+
+ if !isDown {
+ nodes = append(nodes, net.JoinHostPort(ip, port))
+ }
+ }
+
+ return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+ c.mu.RLock()
+ currentAddr := c._masterAddr
+ c.mu.RUnlock()
+
+ if addr == currentAddr {
+ return
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if addr == c._masterAddr {
+ return
+ }
+ c._masterAddr = addr
+
+ internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+ c.opt.MasterName, addr)
+ if c.onFailover != nil {
+ c.onFailover(ctx, addr)
+ }
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+ if c.sentinel != nil {
+ panic("not reached")
+ }
+ c.sentinel = sentinel
+ c.discoverSentinels(ctx)
+
+ c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+ go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+ sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+ if err != nil {
+ internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+ return
+ }
+ for _, sentinel := range sentinels {
+ vals := sentinel.([]interface{})
+ for i := 0; i < len(vals); i += 2 {
+ key := vals[i].(string)
+ if key == "name" {
+ sentinelAddr := vals[i+1].(string)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+ }
+ }
+ }
+ }
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+ ctx := context.TODO()
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+
+ ch := pubsub.Channel()
+ for msg := range ch {
+ if msg.Channel == "+switch-master" {
+ parts := strings.Split(msg.Payload, " ")
+ if parts[0] != c.opt.MasterName {
+ internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+ continue
+ }
+ addr := net.JoinHostPort(parts[3], parts[4])
+ c.trySwitchMaster(pubsub.getContext(), addr)
+ }
+
+ if c.onUpdate != nil {
+ c.onUpdate(ctx)
+ }
+ }
+}
+
+func contains(slice []string, str string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a slave node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+ sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+ copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+ failover := &sentinelFailover{
+ opt: failoverOpt,
+ sentinelAddrs: sentinelAddrs,
+ }
+
+ opt := failoverOpt.clusterOptions()
+ opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+ masterAddr, err := failover.MasterAddr(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := []ClusterNode{{
+ Addr: masterAddr,
+ }}
+
+ slaveAddrs, err := failover.slaveAddrs(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, slaveAddr := range slaveAddrs {
+ nodes = append(nodes, ClusterNode{
+ Addr: slaveAddr,
+ })
+ }
+
+ slots := []ClusterSlot{
+ {
+ Start: 0,
+ End: 16383,
+ Nodes: nodes,
+ },
+ }
+ return slots, nil
+ }
+
+ c := NewClusterClient(opt)
+ failover.onUpdate = func(ctx context.Context) {
+ c.ReloadState(ctx)
+ }
+
+ return c
+}
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
new file mode 100644
index 0000000..ad825c6
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/tx.go
@@ -0,0 +1,151 @@
+package redis
+
+import (
+ "context"
+
+ "github.com/go-redis/redis/v8/internal/pool"
+ "github.com/go-redis/redis/v8/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+// If you don't need WATCH it is better to use Pipeline.
+type Tx struct {
+ baseClient
+ cmdable
+ statefulCmdable
+ hooks
+ ctx context.Context
+}
+
+func (c *Client) newTx(ctx context.Context) *Tx {
+ tx := Tx{
+ baseClient: baseClient{
+ opt: c.opt,
+ connPool: pool.NewStickyConnPool(c.connPool),
+ },
+ hooks: c.hooks.clone(),
+ ctx: ctx,
+ }
+ tx.init()
+ return &tx
+}
+
+func (c *Tx) init() {
+ c.cmdable = c.Process
+ c.statefulCmdable = c.Process
+}
+
+func (c *Tx) Context() context.Context {
+ return c.ctx
+}
+
+func (c *Tx) WithContext(ctx context.Context) *Tx {
+ if ctx == nil {
+ panic("nil context")
+ }
+ clone := *c
+ clone.init()
+ clone.hooks.lock()
+ clone.ctx = ctx
+ return &clone
+}
+
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
+ return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+ tx := c.newTx(ctx)
+ if len(keys) > 0 {
+ if err := tx.Watch(ctx, keys...).Err(); err != nil {
+ _ = tx.Close(ctx)
+ return err
+ }
+ }
+
+ err := fn(tx)
+ _ = tx.Close(ctx)
+ return err
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close(ctx context.Context) error {
+ _ = c.Unwatch(ctx).Err()
+ return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "watch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unwatch"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c.Process(ctx, cmd)
+ return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(ctx, fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ ctx: c.ctx,
+ exec: func(ctx context.Context, cmds []Cmder) error {
+ return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+ },
+ }
+ pipe.init()
+ return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
new file mode 100644
index 0000000..5f0e1e3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/universal.go
@@ -0,0 +1,206 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+ // Either a single address or a seed list of host:port addresses
+ // of cluster/sentinel nodes.
+ Addrs []string
+
+ // Database to be selected after connecting to the server.
+ // Only single-node and failover clients.
+ DB int
+
+ // Common options.
+
+ Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+ OnConnect func(ctx context.Context, cn *Conn) error
+
+ Username string
+ Password string
+ SentinelPassword string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+
+ // Only cluster clients.
+
+ MaxRedirects int
+ ReadOnly bool
+ RouteByLatency bool
+ RouteRandomly bool
+
+ // The sentinel master name.
+ // Only failover clients.
+ MasterName string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:6379"}
+ }
+
+ return &ClusterOptions{
+ Addrs: o.Addrs,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRedirects: o.MaxRedirects,
+ ReadOnly: o.ReadOnly,
+ RouteByLatency: o.RouteByLatency,
+ RouteRandomly: o.RouteRandomly,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+ if len(o.Addrs) == 0 {
+ o.Addrs = []string{"127.0.0.1:26379"}
+ }
+
+ return &FailoverOptions{
+ SentinelAddrs: o.Addrs,
+ MasterName: o.MasterName,
+
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+ SentinelPassword: o.SentinelPassword,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+ addr := "127.0.0.1:6379"
+ if len(o.Addrs) > 0 {
+ addr = o.Addrs[0]
+ }
+
+ return &Options{
+ Addr: addr,
+ Dialer: o.Dialer,
+ OnConnect: o.OnConnect,
+
+ DB: o.DB,
+ Username: o.Username,
+ Password: o.Password,
+
+ MaxRetries: o.MaxRetries,
+ MinRetryBackoff: o.MinRetryBackoff,
+ MaxRetryBackoff: o.MaxRetryBackoff,
+
+ DialTimeout: o.DialTimeout,
+ ReadTimeout: o.ReadTimeout,
+ WriteTimeout: o.WriteTimeout,
+
+ PoolSize: o.PoolSize,
+ MinIdleConns: o.MinIdleConns,
+ MaxConnAge: o.MaxConnAge,
+ PoolTimeout: o.PoolTimeout,
+ IdleTimeout: o.IdleTimeout,
+ IdleCheckFrequency: o.IdleCheckFrequency,
+
+ TLSConfig: o.TLSConfig,
+ }
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// can connect to either clusters, or sentinel-backed failover instances
+// or simple single-instance servers. This can be useful for testing
+// cluster-specific applications locally.
+type UniversalClient interface {
+ Cmdable
+ Context() context.Context
+ AddHook(Hook)
+ Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+ Do(ctx context.Context, args ...interface{}) *Cmd
+ Process(ctx context.Context, cmd Cmder) error
+ Subscribe(ctx context.Context, channels ...string) *PubSub
+ PSubscribe(ctx context.Context, channels ...string) *PubSub
+ Close() error
+ PoolStats() *PoolStats
+}
+
+var (
+ _ UniversalClient = (*Client)(nil)
+ _ UniversalClient = (*ClusterClient)(nil)
+ _ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of client returned depends
+// on the following three conditions:
+//
+// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
+// 2. if the number of Addrs is two or more, a ClusterClient will be returned
+// 3. otherwise, a single-node redis Client will be returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+ if opts.MasterName != "" {
+ return NewFailoverClient(opts.Failover())
+ } else if len(opts.Addrs) > 1 {
+ return NewClusterClient(opts.Cluster())
+ }
+ return NewClient(opts.Simple())
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
index 2e57a27..31f100b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
@@ -75,6 +75,10 @@
func (b *Backend) newClient(ctx context.Context, address string, timeout time.Duration) (kvstore.Client, error) {
switch b.StoreType {
+ case "redis":
+ return kvstore.NewRedisClient(address, timeout, false)
+ case "redis-sentinel":
+ return kvstore.NewRedisClient(address, timeout, true)
case "etcd":
return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
}
@@ -176,7 +180,7 @@
// List retrieves one or more items that match the specified key
func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
- span, ctx := log.CreateChildSpan(ctx, "etcd-list")
+ span, ctx := log.CreateChildSpan(ctx, "kvs-list")
defer span.Finish()
formattedPath := b.makePath(ctx, key)
@@ -191,7 +195,7 @@
// Get retrieves an item that matches the specified key
func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
- span, ctx := log.CreateChildSpan(ctx, "etcd-get")
+ span, ctx := log.CreateChildSpan(ctx, "kvs-get")
defer span.Finish()
formattedPath := b.makePath(ctx, key)
@@ -206,7 +210,7 @@
// Put stores an item value under the specifed key
func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
- span, ctx := log.CreateChildSpan(ctx, "etcd-put")
+ span, ctx := log.CreateChildSpan(ctx, "kvs-put")
defer span.Finish()
formattedPath := b.makePath(ctx, key)
@@ -221,7 +225,7 @@
// Delete removes an item under the specified key
func (b *Backend) Delete(ctx context.Context, key string) error {
- span, ctx := log.CreateChildSpan(ctx, "etcd-delete")
+ span, ctx := log.CreateChildSpan(ctx, "kvs-delete")
defer span.Finish()
formattedPath := b.makePath(ctx, key)
@@ -234,9 +238,8 @@
return err
}
-// DeleteWithPrefix removes items having prefix key
func (b *Backend) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
- span, ctx := log.CreateChildSpan(ctx, "etcd-delete-with-prefix")
+ span, ctx := log.CreateChildSpan(ctx, "kvs-delete-with-prefix")
defer span.Finish()
formattedPath := b.makePath(ctx, prefixKey)
@@ -251,7 +254,7 @@
// CreateWatch starts watching events for the specified key
func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
- span, ctx := log.CreateChildSpan(ctx, "etcd-create-watch")
+ span, ctx := log.CreateChildSpan(ctx, "kvs-create-watch")
defer span.Finish()
formattedPath := b.makePath(ctx, key)
@@ -262,7 +265,7 @@
// DeleteWatch stops watching events for the specified key
func (b *Backend) DeleteWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
- span, ctx := log.CreateChildSpan(ctx, "etcd-delete-watch")
+ span, ctx := log.CreateChildSpan(ctx, "kvs-delete-watch")
defer span.Finish()
formattedPath := b.makePath(ctx, key)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
new file mode 100644
index 0000000..decb0a4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-redis/redis/v8"
+ "github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+type RedisClient struct {
+ redisAPI *redis.Client
+ keyReservations map[string]time.Duration
+ watchedChannels sync.Map
+ writeLock sync.Mutex
+ keyReservationsLock sync.RWMutex
+}
+
+func NewRedisClient(addr string, timeout time.Duration, useSentinel bool) (*RedisClient, error) {
+ var r *redis.Client
+ if !useSentinel {
+ r = redis.NewClient(&redis.Options{Addr: addr})
+ } else {
+ // Redis Master-Replicas with Sentinel, sentinel masterSet config
+ // should be set to sebaRedis
+ r = redis.NewFailoverClient(&redis.FailoverOptions{
+ MasterName: "sebaRedis",
+ SentinelAddrs: []string{addr},
+ })
+ }
+
+ reservations := make(map[string]time.Duration)
+ return &RedisClient{redisAPI: r, keyReservations: reservations}, nil
+}
+
+func (c *RedisClient) Get(ctx context.Context, key string) (*KVPair, error) {
+
+ val, err := c.redisAPI.Get(ctx, key).Result()
+ valBytes, _ := ToByte(val)
+ if err != nil {
+ return nil, nil
+ }
+ return NewKVPair(key, valBytes, "", 0, 0), nil
+}
+
+func (c *RedisClient) Put(ctx context.Context, key string, value interface{}) error {
+
+ // Validate that we can convert value to a string as etcd API expects a string
+ var val string
+ var er error
+ if val, er = ToString(value); er != nil {
+ return fmt.Errorf("unexpected-type-%T", value)
+ }
+
+ // Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
+ // that KV key permanent instead of automatically removing it after a lease expiration
+ setErr := c.redisAPI.Set(ctx, key, val, 0)
+ err := setErr.Err()
+
+ if err != nil {
+ switch setErr.Err() {
+ case context.Canceled:
+ logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+ case context.DeadlineExceeded:
+ logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err})
+ default:
+ logger.Warnw(ctx, "bad-endpoints", log.Fields{"error": err})
+ }
+ return err
+ }
+ return nil
+}
+
+func (c *RedisClient) scanAllKeysWithPrefix(ctx context.Context, key string) ([]string, error) {
+ var err error
+ allkeys := []string{}
+ cont := true
+ cursor := uint64(0)
+ matchPrefix := key + "*"
+
+ for cont {
+ // search in the first 10000 entries starting from the point indicated by the cursor
+ logger.Debugw(ctx, "redis-scan", log.Fields{"matchPrefix": matchPrefix, "cursor": cursor})
+ var keys []string
+ keys, cursor, err = c.redisAPI.Scan(context.Background(), cursor, matchPrefix, 10000).Result()
+ if err != nil {
+ return nil, err
+ }
+ if cursor == 0 {
+ // all data searched. break the loop
+ logger.Debugw(ctx, "redis-scan-ended", log.Fields{"matchPrefix": matchPrefix, "cursor": cursor})
+ cont = false
+ }
+ if len(keys) == 0 {
+ // no matched data found in this cycle. Continue to search
+ logger.Debugw(ctx, "redis-scan-no-data-found-continue", log.Fields{"matchPrefix": matchPrefix, "cursor": cursor})
+ continue
+ }
+ allkeys = append(allkeys, keys...)
+ }
+ return allkeys, nil
+}
+
+func (c *RedisClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+ var err error
+ var keys []string
+ m := make(map[string]*KVPair)
+ var values []interface{}
+
+ if keys, err = c.scanAllKeysWithPrefix(ctx, key); err != nil {
+ return nil, err
+ }
+
+ if len(keys) != 0 {
+ values, err = c.redisAPI.MGet(ctx, keys...).Result()
+ if err != nil {
+ return nil, err
+ }
+ }
+ for i, key := range keys {
+ if valBytes, err := ToByte(values[i]); err == nil {
+ m[key] = NewKVPair(key, interface{}(valBytes), "", 0, 0)
+ }
+ }
+ return m, nil
+}
+
+func (c *RedisClient) Delete(ctx context.Context, key string) error {
+ // delete the key
+ if _, err := c.redisAPI.Del(ctx, key).Result(); err != nil {
+ logger.Errorw(ctx, "failed-to-delete-key", log.Fields{"key": key, "error": err})
+ return err
+ }
+ logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+ return nil
+}
+
+func (c *RedisClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+ var keys []string
+ var err error
+ if keys, err = c.scanAllKeysWithPrefix(ctx, prefixKey); err != nil {
+ return err
+ }
+ if len(keys) == 0 {
+ logger.Warn(ctx, "nothing-to-delete-from-kv", log.Fields{"key": prefixKey})
+ return nil
+ }
+ //call delete for keys
+ entryCount := int64(0)
+ start := 0
+ pageSize := 5000
+ length := len(keys)
+ for start < length {
+ end := start + pageSize
+ if end >= length {
+ end = length
+ }
+ keysToDelete := keys[start:end]
+ count := int64(0)
+ if count, err = c.redisAPI.Del(ctx, keysToDelete...).Result(); err != nil {
+ logger.Errorw(ctx, "DeleteWithPrefix method failed", log.Fields{"prefixKey": prefixKey, "numOfMatchedKeys": len(keysToDelete), "err": err})
+ return err
+ }
+ entryCount += count
+ start = end
+ }
+ logger.Debugf(ctx, "%d entries matching with the key prefix %s have been deleted successfully", entryCount, prefixKey)
+ return nil
+}
+
+func (c *RedisClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
+ var val string
+ var er error
+ if val, er = ToString(value); er != nil {
+ return nil, fmt.Errorf("unexpected-type%T", value)
+ }
+
+ // SetNX -- Only set the key if it does not already exist.
+ c.redisAPI.SetNX(ctx, key, value, ttl)
+
+ // Check if set is successful
+ redisVal := c.redisAPI.Get(ctx, key).Val()
+ if redisVal == "" {
+ println("NULL")
+ return nil, nil
+ }
+
+ if val == redisVal {
+ // set is successful, return new reservation value
+ c.keyReservationsLock.Lock()
+ c.keyReservations[key] = ttl
+ c.keyReservationsLock.Unlock()
+ bytes, _ := ToByte(val)
+ return bytes, nil
+ } else {
+ // set is not successful, return existing reservation value
+ bytes, _ := ToByte(redisVal)
+ return bytes, nil
+ }
+
+}
+
+func (c *RedisClient) ReleaseReservation(ctx context.Context, key string) error {
+
+ redisVal := c.redisAPI.Get(ctx, key).Val()
+ if redisVal == "" {
+ return nil
+ }
+
+ // Override SetNX value with no TTL
+ _, err := c.redisAPI.Set(ctx, key, redisVal, 0).Result()
+ if err != nil {
+ delete(c.keyReservations, key)
+ } else {
+ return err
+ }
+ return nil
+
+}
+
+func (c *RedisClient) ReleaseAllReservations(ctx context.Context) error {
+ c.writeLock.Lock()
+ defer c.writeLock.Unlock()
+ for key := range c.keyReservations {
+ err := c.ReleaseReservation(ctx, key)
+ if err != nil {
+ logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *RedisClient) RenewReservation(ctx context.Context, key string) error {
+ c.writeLock.Lock()
+ defer c.writeLock.Unlock()
+
+ // Verify the key was reserved
+ ttl, ok := c.keyReservations[key]
+ if !ok {
+ return errors.New("key-not-reserved. Key not found")
+ }
+
+ redisVal := c.redisAPI.Get(ctx, key).Val()
+ if redisVal != "" {
+ c.redisAPI.Set(ctx, key, redisVal, ttl)
+ }
+ return nil
+}
+
+func (c *RedisClient) listenForKeyChange(ctx context.Context, redisCh <-chan *redis.Message, ch chan<- *Event, cancel context.CancelFunc) {
+ logger.Debug(ctx, "start-listening-on-channel ...")
+ defer cancel()
+ defer close(ch)
+ for msg := range redisCh {
+ words := strings.Split(msg.Channel, ":")
+ key := words[1]
+ msgType := getMessageType(msg.Payload)
+ var valBytes []byte
+ if msgType == PUT {
+ ev, _ := c.Get(ctx, key)
+ valBytes, _ = ToByte(ev.Value)
+ }
+ ch <- NewEvent(getMessageType(msg.Payload), []byte(key), valBytes, 0)
+ }
+ logger.Debug(ctx, "stop-listening-on-channel ...")
+}
+
+func getMessageType(msg string) int {
+ isPut := strings.HasSuffix(msg, "set")
+ isDel := strings.HasSuffix(msg, "del")
+ if isPut {
+ return PUT
+ } else if isDel {
+ return DELETE
+ } else {
+ return UNKNOWN
+ }
+}
+
+func (c *RedisClient) addChannelMap(key string, channelMap map[chan *Event]*redis.PubSub) []map[chan *Event]*redis.PubSub {
+
+ var channels interface{}
+ var exists bool
+
+ if channels, exists = c.watchedChannels.Load(key); exists {
+ channels = append(channels.([]map[chan *Event]*redis.PubSub), channelMap)
+ } else {
+ channels = []map[chan *Event]*redis.PubSub{channelMap}
+ }
+ c.watchedChannels.Store(key, channels)
+
+ return channels.([]map[chan *Event]*redis.PubSub)
+}
+
+func (c *RedisClient) removeChannelMap(key string, pos int) []map[chan *Event]*redis.PubSub {
+ var channels interface{}
+ var exists bool
+
+ if channels, exists = c.watchedChannels.Load(key); exists {
+ channels = append(channels.([]map[chan *Event]*redis.PubSub)[:pos], channels.([]map[chan *Event]*redis.PubSub)[pos+1:]...)
+ c.watchedChannels.Store(key, channels)
+ }
+
+ return channels.([]map[chan *Event]*redis.PubSub)
+}
+
+func (c *RedisClient) getChannelMaps(key string) ([]map[chan *Event]*redis.PubSub, bool) {
+ var channels interface{}
+ var exists bool
+
+ channels, exists = c.watchedChannels.Load(key)
+
+ if channels == nil {
+ return nil, exists
+ }
+
+ return channels.([]map[chan *Event]*redis.PubSub), exists
+}
+
+func (c *RedisClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+
+ ctx, cancel := context.WithCancel(ctx)
+
+ var subscribePath string
+ subscribePath = "__key*__:" + key
+ if withPrefix {
+ subscribePath += "*"
+ }
+ pubsub := c.redisAPI.PSubscribe(ctx, subscribePath)
+ redisCh := pubsub.Channel()
+
+ // Create new channel
+ ch := make(chan *Event, maxClientChannelBufferSize)
+
+ // Keep track of the created channels so they can be closed when required
+ channelMap := make(map[chan *Event]*redis.PubSub)
+ channelMap[ch] = pubsub
+
+ channelMaps := c.addChannelMap(key, channelMap)
+ logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+
+ // Launch a go routine to listen for updates
+ go c.listenForKeyChange(ctx, redisCh, ch, cancel)
+ return ch
+}
+
+func (c *RedisClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+ // Get the array of channels mapping
+ var watchedChannels []map[chan *Event]*redis.PubSub
+ var ok bool
+
+ if watchedChannels, ok = c.getChannelMaps(key); !ok {
+ logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+ return
+ }
+ // Look for the channels
+ var pos = -1
+ for i, chMap := range watchedChannels {
+ if t, ok := chMap[ch]; ok {
+ logger.Debug(ctx, "channel-found")
+ // Close the Redis watcher before the client channel. This should close the etcd channel as well
+ if err := t.Close(); err != nil {
+ logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+ }
+ pos = i
+ break
+ }
+ }
+
+ channelMaps, _ := c.getChannelMaps(key)
+ // Remove that entry if present
+ if pos >= 0 {
+ channelMaps = c.removeChannelMap(key, pos)
+ }
+ logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+func (c *RedisClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
+ return nil
+}
+
+func (c *RedisClient) ReleaseLock(lockName string) error {
+ return nil
+}
+
+func (c *RedisClient) IsConnectionUp(ctx context.Context) bool {
+ if _, err := c.redisAPI.Set(ctx, "connection-check", "1", 0).Result(); err != nil {
+ return false
+ }
+ return true
+
+}
+
+func (c *RedisClient) Close(ctx context.Context) {
+ if err := c.redisAPI.Close(); err != nil {
+ logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
+ }
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/common.go
index 0f0468e..25d9683 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2022 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
index 294983f..3baa1f4 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-present Open Networking Foundation
+ * Copyright 2021-2022 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/common.go
index c0d169a..f319d66 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/common.go
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-present Open Networking Foundation
+ * Copyright 2020-2022 Open Networking Foundation (ONF) and the ONF Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
index 7b1a123..6810fe1 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
@@ -660,3 +660,39 @@
func (l clogger) GetLogLevel() LogLevel {
return levelToLogLevel(cfgs[l.packageName].Level.Level())
}
+
+//UpdateCallerSkipLevel create new loggers for specified registered pacakges with the default updated caller skipltFields.
+//This will enable to skip wrapper file caller in caller info and stacktrace
+func UpdateCallerSkipLevel(skipLevel int) (CLogger, error) {
+ pkgName, _, _, _ := getCallerInfo()
+ if cfg, exist := cfgs[pkgName]; exist {
+ l, err := cfg.Build(zp.AddCallerSkip(skipLevel))
+ if err != nil {
+ return loggers[pkgName], err
+ }
+
+ // Update the existing zap logger instance
+ loggers[pkgName].log = l.Sugar()
+ loggers[pkgName].parent = l
+
+ return loggers[pkgName], nil
+ }
+
+ return loggers[pkgName], errors.New("Package Not Found")
+}
+
+//UpdateAllCallerSkipLevel create new loggers for all registered pacakges with the default updated caller skipltFields.
+//This will enable to skip wrapper file caller in caller info and stacktrace
+func UpdateAllCallerSkipLevel(skipLevel int) error {
+ for pkgName, cfg := range cfgs {
+ l, err := cfg.Build(zp.AddCallerSkip(skipLevel))
+ if err != nil {
+ return err
+ }
+
+ // Update the existing zap logger instance
+ loggers[pkgName].log = l.Sugar()
+ loggers[pkgName].parent = l
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
index e6105b6..a6c7a43 100644
--- a/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
@@ -228,7 +228,7 @@
}
func (GetOmciEthernetFrameExtendedPmResponse_Format) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{21, 0}
+ return fileDescriptor_7ecf6e9799a9202d, []int{22, 0}
}
type GetValueResponse_Status int32
@@ -256,7 +256,7 @@
}
func (GetValueResponse_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{24, 0}
+ return fileDescriptor_7ecf6e9799a9202d, []int{29, 0}
}
type GetValueResponse_ErrorReason int32
@@ -299,7 +299,7 @@
}
func (GetValueResponse_ErrorReason) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{24, 1}
+ return fileDescriptor_7ecf6e9799a9202d, []int{29, 1}
}
type SetValueResponse_Status int32
@@ -327,7 +327,7 @@
}
func (SetValueResponse_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{26, 0}
+ return fileDescriptor_7ecf6e9799a9202d, []int{31, 0}
}
type SetValueResponse_ErrorReason int32
@@ -352,7 +352,7 @@
}
func (SetValueResponse_ErrorReason) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{26, 1}
+ return fileDescriptor_7ecf6e9799a9202d, []int{31, 1}
}
type ValueSet struct {
@@ -1450,6 +1450,7 @@
}
}
+// DEPRECATED
type GetRxPowerRequest struct {
IntfId uint32 `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
OnuId uint32 `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
@@ -1497,6 +1498,57 @@
return 0
}
+type GetOltRxPowerRequest struct {
+ PortLabel string `protobuf:"bytes,1,opt,name=port_label,json=portLabel,proto3" json:"port_label,omitempty"`
+ // onu_sn is optional and if onu_sn is an empty string and the label is
+ // of a PON port then it means that the Rx Power corresponding to all
+ // the ONUs on that PON port is requested. In case the port_label is not
+ // of a PON port, the onu_sn does not have any significance
+ OnuSn string `protobuf:"bytes,2,opt,name=onu_sn,json=onuSn,proto3" json:"onu_sn,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetOltRxPowerRequest) Reset() { *m = GetOltRxPowerRequest{} }
+func (m *GetOltRxPowerRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOltRxPowerRequest) ProtoMessage() {}
+func (*GetOltRxPowerRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ecf6e9799a9202d, []int{19}
+}
+
+func (m *GetOltRxPowerRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetOltRxPowerRequest.Unmarshal(m, b)
+}
+func (m *GetOltRxPowerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetOltRxPowerRequest.Marshal(b, m, deterministic)
+}
+func (m *GetOltRxPowerRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetOltRxPowerRequest.Merge(m, src)
+}
+func (m *GetOltRxPowerRequest) XXX_Size() int {
+ return xxx_messageInfo_GetOltRxPowerRequest.Size(m)
+}
+func (m *GetOltRxPowerRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetOltRxPowerRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOltRxPowerRequest proto.InternalMessageInfo
+
+func (m *GetOltRxPowerRequest) GetPortLabel() string {
+ if m != nil {
+ return m.PortLabel
+ }
+ return ""
+}
+
+func (m *GetOltRxPowerRequest) GetOnuSn() string {
+ if m != nil {
+ return m.OnuSn
+ }
+ return ""
+}
+
type GetOnuCountersResponse struct {
// Types that are valid to be assigned to IsIntfId:
// *GetOnuCountersResponse_IntfId
@@ -1588,7 +1640,7 @@
func (m *GetOnuCountersResponse) String() string { return proto.CompactTextString(m) }
func (*GetOnuCountersResponse) ProtoMessage() {}
func (*GetOnuCountersResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{19}
+ return fileDescriptor_7ecf6e9799a9202d, []int{20}
}
func (m *GetOnuCountersResponse) XXX_Unmarshal(b []byte) error {
@@ -2318,7 +2370,7 @@
func (m *OmciEthernetFrameExtendedPm) String() string { return proto.CompactTextString(m) }
func (*OmciEthernetFrameExtendedPm) ProtoMessage() {}
func (*OmciEthernetFrameExtendedPm) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{20}
+ return fileDescriptor_7ecf6e9799a9202d, []int{21}
}
func (m *OmciEthernetFrameExtendedPm) XXX_Unmarshal(b []byte) error {
@@ -2452,7 +2504,7 @@
func (m *GetOmciEthernetFrameExtendedPmResponse) String() string { return proto.CompactTextString(m) }
func (*GetOmciEthernetFrameExtendedPmResponse) ProtoMessage() {}
func (*GetOmciEthernetFrameExtendedPmResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{21}
+ return fileDescriptor_7ecf6e9799a9202d, []int{22}
}
func (m *GetOmciEthernetFrameExtendedPmResponse) XXX_Unmarshal(b []byte) error {
@@ -2494,6 +2546,117 @@
return GetOmciEthernetFrameExtendedPmResponse_THIRTY_TWO_BIT
}
+type RxPower struct {
+ OnuSn string `protobuf:"bytes,1,opt,name=onu_sn,json=onuSn,proto3" json:"onu_sn,omitempty"`
+ Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+ FailReason string `protobuf:"bytes,3,opt,name=fail_reason,json=failReason,proto3" json:"fail_reason,omitempty"`
+ RxPower float64 `protobuf:"fixed64,4,opt,name=rx_power,json=rxPower,proto3" json:"rx_power,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RxPower) Reset() { *m = RxPower{} }
+func (m *RxPower) String() string { return proto.CompactTextString(m) }
+func (*RxPower) ProtoMessage() {}
+func (*RxPower) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ecf6e9799a9202d, []int{23}
+}
+
+func (m *RxPower) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RxPower.Unmarshal(m, b)
+}
+func (m *RxPower) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RxPower.Marshal(b, m, deterministic)
+}
+func (m *RxPower) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RxPower.Merge(m, src)
+}
+func (m *RxPower) XXX_Size() int {
+ return xxx_messageInfo_RxPower.Size(m)
+}
+func (m *RxPower) XXX_DiscardUnknown() {
+ xxx_messageInfo_RxPower.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RxPower proto.InternalMessageInfo
+
+func (m *RxPower) GetOnuSn() string {
+ if m != nil {
+ return m.OnuSn
+ }
+ return ""
+}
+
+func (m *RxPower) GetStatus() string {
+ if m != nil {
+ return m.Status
+ }
+ return ""
+}
+
+func (m *RxPower) GetFailReason() string {
+ if m != nil {
+ return m.FailReason
+ }
+ return ""
+}
+
+func (m *RxPower) GetRxPower() float64 {
+ if m != nil {
+ return m.RxPower
+ }
+ return 0
+}
+
+type GetOltRxPowerResponse struct {
+ PortLabel string `protobuf:"bytes,1,opt,name=port_label,json=portLabel,proto3" json:"port_label,omitempty"`
+ RxPower []*RxPower `protobuf:"bytes,2,rep,name=rx_power,json=rxPower,proto3" json:"rx_power,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetOltRxPowerResponse) Reset() { *m = GetOltRxPowerResponse{} }
+func (m *GetOltRxPowerResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOltRxPowerResponse) ProtoMessage() {}
+func (*GetOltRxPowerResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ecf6e9799a9202d, []int{24}
+}
+
+func (m *GetOltRxPowerResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetOltRxPowerResponse.Unmarshal(m, b)
+}
+func (m *GetOltRxPowerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetOltRxPowerResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOltRxPowerResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetOltRxPowerResponse.Merge(m, src)
+}
+func (m *GetOltRxPowerResponse) XXX_Size() int {
+ return xxx_messageInfo_GetOltRxPowerResponse.Size(m)
+}
+func (m *GetOltRxPowerResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetOltRxPowerResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOltRxPowerResponse proto.InternalMessageInfo
+
+func (m *GetOltRxPowerResponse) GetPortLabel() string {
+ if m != nil {
+ return m.PortLabel
+ }
+ return ""
+}
+
+func (m *GetOltRxPowerResponse) GetRxPower() []*RxPower {
+ if m != nil {
+ return m.RxPower
+ }
+ return nil
+}
+
+// DEPRECATED
type GetRxPowerResponse struct {
IntfId uint32 `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
OnuId uint32 `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
@@ -2509,7 +2672,7 @@
func (m *GetRxPowerResponse) String() string { return proto.CompactTextString(m) }
func (*GetRxPowerResponse) ProtoMessage() {}
func (*GetRxPowerResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{22}
+ return fileDescriptor_7ecf6e9799a9202d, []int{25}
}
func (m *GetRxPowerResponse) XXX_Unmarshal(b []byte) error {
@@ -2565,6 +2728,167 @@
return 0
}
+type GetOnuOmciTxRxStatsRequest struct {
+ Empty *empty.Empty `protobuf:"bytes,1,opt,name=empty,proto3" json:"empty,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetOnuOmciTxRxStatsRequest) Reset() { *m = GetOnuOmciTxRxStatsRequest{} }
+func (m *GetOnuOmciTxRxStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOnuOmciTxRxStatsRequest) ProtoMessage() {}
+func (*GetOnuOmciTxRxStatsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ecf6e9799a9202d, []int{26}
+}
+
+func (m *GetOnuOmciTxRxStatsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetOnuOmciTxRxStatsRequest.Unmarshal(m, b)
+}
+func (m *GetOnuOmciTxRxStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetOnuOmciTxRxStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *GetOnuOmciTxRxStatsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetOnuOmciTxRxStatsRequest.Merge(m, src)
+}
+func (m *GetOnuOmciTxRxStatsRequest) XXX_Size() int {
+ return xxx_messageInfo_GetOnuOmciTxRxStatsRequest.Size(m)
+}
+func (m *GetOnuOmciTxRxStatsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetOnuOmciTxRxStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuOmciTxRxStatsRequest proto.InternalMessageInfo
+
+func (m *GetOnuOmciTxRxStatsRequest) GetEmpty() *empty.Empty {
+ if m != nil {
+ return m.Empty
+ }
+ return nil
+}
+
+// see ITU-T G.988 clause 11.2.2
+type GetOnuOmciTxRxStatsResponse struct {
+ // OMCI baseline Tx frames with AR bit set
+ BaseTxArFrames uint32 `protobuf:"varint,1,opt,name=base_tx_ar_frames,json=baseTxArFrames,proto3" json:"base_tx_ar_frames,omitempty"`
+ // OMCI baseline Rx frames with AK bit set
+ BaseRxAkFrames uint32 `protobuf:"varint,2,opt,name=base_rx_ak_frames,json=baseRxAkFrames,proto3" json:"base_rx_ak_frames,omitempty"`
+ // OMCI baseline Tx frames with AR bit unset
+ BaseTxNoArFrames uint32 `protobuf:"varint,3,opt,name=base_tx_no_ar_frames,json=baseTxNoArFrames,proto3" json:"base_tx_no_ar_frames,omitempty"`
+ // OMCI baseline Rx frames with AK bit unset
+ BaseRxNoAkFrames uint32 `protobuf:"varint,4,opt,name=base_rx_no_ak_frames,json=baseRxNoAkFrames,proto3" json:"base_rx_no_ak_frames,omitempty"`
+ // OMCI extended Tx frames with AR bit set
+ ExtTxArFrames uint32 `protobuf:"varint,5,opt,name=ext_tx_ar_frames,json=extTxArFrames,proto3" json:"ext_tx_ar_frames,omitempty"`
+ // OMCI extended Rx frames with AK bit set
+ ExtRxAkFrames uint32 `protobuf:"varint,6,opt,name=ext_rx_ak_frames,json=extRxAkFrames,proto3" json:"ext_rx_ak_frames,omitempty"`
+ // OMCI extended Tx frames with AR bit unset
+ ExtTxNoArFrames uint32 `protobuf:"varint,7,opt,name=ext_tx_no_ar_frames,json=extTxNoArFrames,proto3" json:"ext_tx_no_ar_frames,omitempty"`
+ // OMCI extended Rx frames with AK bit unset
+ ExtRxNoAkFrames uint32 `protobuf:"varint,8,opt,name=ext_rx_no_ak_frames,json=extRxNoAkFrames,proto3" json:"ext_rx_no_ak_frames,omitempty"`
+ // Number of retries of requests (tx) due to not received responses (Rx)
+ TxOmciCounterRetries uint32 `protobuf:"varint,9,opt,name=tx_omci_counter_retries,json=txOmciCounterRetries,proto3" json:"tx_omci_counter_retries,omitempty"`
+ // Number of timeouts of requests (tx) due to not received responses (Rx) after configured number of retries
+ TxOmciCounterTimeouts uint32 `protobuf:"varint,10,opt,name=tx_omci_counter_timeouts,json=txOmciCounterTimeouts,proto3" json:"tx_omci_counter_timeouts,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) Reset() { *m = GetOnuOmciTxRxStatsResponse{} }
+func (m *GetOnuOmciTxRxStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOnuOmciTxRxStatsResponse) ProtoMessage() {}
+func (*GetOnuOmciTxRxStatsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7ecf6e9799a9202d, []int{27}
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetOnuOmciTxRxStatsResponse.Unmarshal(m, b)
+}
+func (m *GetOnuOmciTxRxStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetOnuOmciTxRxStatsResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOnuOmciTxRxStatsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetOnuOmciTxRxStatsResponse.Merge(m, src)
+}
+func (m *GetOnuOmciTxRxStatsResponse) XXX_Size() int {
+ return xxx_messageInfo_GetOnuOmciTxRxStatsResponse.Size(m)
+}
+func (m *GetOnuOmciTxRxStatsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetOnuOmciTxRxStatsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuOmciTxRxStatsResponse proto.InternalMessageInfo
+
+func (m *GetOnuOmciTxRxStatsResponse) GetBaseTxArFrames() uint32 {
+ if m != nil {
+ return m.BaseTxArFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetBaseRxAkFrames() uint32 {
+ if m != nil {
+ return m.BaseRxAkFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetBaseTxNoArFrames() uint32 {
+ if m != nil {
+ return m.BaseTxNoArFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetBaseRxNoAkFrames() uint32 {
+ if m != nil {
+ return m.BaseRxNoAkFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetExtTxArFrames() uint32 {
+ if m != nil {
+ return m.ExtTxArFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetExtRxAkFrames() uint32 {
+ if m != nil {
+ return m.ExtRxAkFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetExtTxNoArFrames() uint32 {
+ if m != nil {
+ return m.ExtTxNoArFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetExtRxNoAkFrames() uint32 {
+ if m != nil {
+ return m.ExtRxNoAkFrames
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetTxOmciCounterRetries() uint32 {
+ if m != nil {
+ return m.TxOmciCounterRetries
+ }
+ return 0
+}
+
+func (m *GetOnuOmciTxRxStatsResponse) GetTxOmciCounterTimeouts() uint32 {
+ if m != nil {
+ return m.TxOmciCounterTimeouts
+ }
+ return 0
+}
+
type GetValueRequest struct {
// Types that are valid to be assigned to Request:
// *GetValueRequest_Distance
@@ -2576,6 +2900,8 @@
// *GetValueRequest_OnuPonInfo
// *GetValueRequest_OnuInfo
// *GetValueRequest_RxPower
+ // *GetValueRequest_OnuOmciStats
+ // *GetValueRequest_OltRxPower
Request isGetValueRequest_Request `protobuf_oneof:"request"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -2586,7 +2912,7 @@
func (m *GetValueRequest) String() string { return proto.CompactTextString(m) }
func (*GetValueRequest) ProtoMessage() {}
func (*GetValueRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{23}
+ return fileDescriptor_7ecf6e9799a9202d, []int{28}
}
func (m *GetValueRequest) XXX_Unmarshal(b []byte) error {
@@ -2647,6 +2973,14 @@
RxPower *GetRxPowerRequest `protobuf:"bytes,9,opt,name=rxPower,proto3,oneof"`
}
+type GetValueRequest_OnuOmciStats struct {
+ OnuOmciStats *GetOnuOmciTxRxStatsRequest `protobuf:"bytes,10,opt,name=onuOmciStats,proto3,oneof"`
+}
+
+type GetValueRequest_OltRxPower struct {
+ OltRxPower *GetOltRxPowerRequest `protobuf:"bytes,11,opt,name=oltRxPower,proto3,oneof"`
+}
+
func (*GetValueRequest_Distance) isGetValueRequest_Request() {}
func (*GetValueRequest_UniInfo) isGetValueRequest_Request() {}
@@ -2665,6 +2999,10 @@
func (*GetValueRequest_RxPower) isGetValueRequest_Request() {}
+func (*GetValueRequest_OnuOmciStats) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_OltRxPower) isGetValueRequest_Request() {}
+
func (m *GetValueRequest) GetRequest() isGetValueRequest_Request {
if m != nil {
return m.Request
@@ -2735,6 +3073,20 @@
return nil
}
+func (m *GetValueRequest) GetOnuOmciStats() *GetOnuOmciTxRxStatsRequest {
+ if x, ok := m.GetRequest().(*GetValueRequest_OnuOmciStats); ok {
+ return x.OnuOmciStats
+ }
+ return nil
+}
+
+func (m *GetValueRequest) GetOltRxPower() *GetOltRxPowerRequest {
+ if x, ok := m.GetRequest().(*GetValueRequest_OltRxPower); ok {
+ return x.OltRxPower
+ }
+ return nil
+}
+
// XXX_OneofWrappers is for the internal use of the proto package.
func (*GetValueRequest) XXX_OneofWrappers() []interface{} {
return []interface{}{
@@ -2747,6 +3099,8 @@
(*GetValueRequest_OnuPonInfo)(nil),
(*GetValueRequest_OnuInfo)(nil),
(*GetValueRequest_RxPower)(nil),
+ (*GetValueRequest_OnuOmciStats)(nil),
+ (*GetValueRequest_OltRxPower)(nil),
}
}
@@ -2763,6 +3117,8 @@
// *GetValueResponse_OnuPonCounters
// *GetValueResponse_OnuCounters
// *GetValueResponse_RxPower
+ // *GetValueResponse_OnuOmciStats
+ // *GetValueResponse_OltRxPower
Response isGetValueResponse_Response `protobuf_oneof:"response"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -2773,7 +3129,7 @@
func (m *GetValueResponse) String() string { return proto.CompactTextString(m) }
func (*GetValueResponse) ProtoMessage() {}
func (*GetValueResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{24}
+ return fileDescriptor_7ecf6e9799a9202d, []int{29}
}
func (m *GetValueResponse) XXX_Unmarshal(b []byte) error {
@@ -2848,6 +3204,14 @@
RxPower *GetRxPowerResponse `protobuf:"bytes,11,opt,name=rxPower,proto3,oneof"`
}
+type GetValueResponse_OnuOmciStats struct {
+ OnuOmciStats *GetOnuOmciTxRxStatsResponse `protobuf:"bytes,12,opt,name=onuOmciStats,proto3,oneof"`
+}
+
+type GetValueResponse_OltRxPower struct {
+ OltRxPower *GetOltRxPowerResponse `protobuf:"bytes,13,opt,name=oltRxPower,proto3,oneof"`
+}
+
func (*GetValueResponse_Distance) isGetValueResponse_Response() {}
func (*GetValueResponse_UniInfo) isGetValueResponse_Response() {}
@@ -2866,6 +3230,10 @@
func (*GetValueResponse_RxPower) isGetValueResponse_Response() {}
+func (*GetValueResponse_OnuOmciStats) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_OltRxPower) isGetValueResponse_Response() {}
+
func (m *GetValueResponse) GetResponse() isGetValueResponse_Response {
if m != nil {
return m.Response
@@ -2936,6 +3304,20 @@
return nil
}
+func (m *GetValueResponse) GetOnuOmciStats() *GetOnuOmciTxRxStatsResponse {
+ if x, ok := m.GetResponse().(*GetValueResponse_OnuOmciStats); ok {
+ return x.OnuOmciStats
+ }
+ return nil
+}
+
+func (m *GetValueResponse) GetOltRxPower() *GetOltRxPowerResponse {
+ if x, ok := m.GetResponse().(*GetValueResponse_OltRxPower); ok {
+ return x.OltRxPower
+ }
+ return nil
+}
+
// XXX_OneofWrappers is for the internal use of the proto package.
func (*GetValueResponse) XXX_OneofWrappers() []interface{} {
return []interface{}{
@@ -2948,6 +3330,8 @@
(*GetValueResponse_OnuPonCounters)(nil),
(*GetValueResponse_OnuCounters)(nil),
(*GetValueResponse_RxPower)(nil),
+ (*GetValueResponse_OnuOmciStats)(nil),
+ (*GetValueResponse_OltRxPower)(nil),
}
}
@@ -2964,7 +3348,7 @@
func (m *SetValueRequest) String() string { return proto.CompactTextString(m) }
func (*SetValueRequest) ProtoMessage() {}
func (*SetValueRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{25}
+ return fileDescriptor_7ecf6e9799a9202d, []int{30}
}
func (m *SetValueRequest) XXX_Unmarshal(b []byte) error {
@@ -3028,7 +3412,7 @@
func (m *SetValueResponse) String() string { return proto.CompactTextString(m) }
func (*SetValueResponse) ProtoMessage() {}
func (*SetValueResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{26}
+ return fileDescriptor_7ecf6e9799a9202d, []int{31}
}
func (m *SetValueResponse) XXX_Unmarshal(b []byte) error {
@@ -3075,7 +3459,7 @@
func (m *SingleGetValueRequest) String() string { return proto.CompactTextString(m) }
func (*SingleGetValueRequest) ProtoMessage() {}
func (*SingleGetValueRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{27}
+ return fileDescriptor_7ecf6e9799a9202d, []int{32}
}
func (m *SingleGetValueRequest) XXX_Unmarshal(b []byte) error {
@@ -3121,7 +3505,7 @@
func (m *SingleGetValueResponse) String() string { return proto.CompactTextString(m) }
func (*SingleGetValueResponse) ProtoMessage() {}
func (*SingleGetValueResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{28}
+ return fileDescriptor_7ecf6e9799a9202d, []int{33}
}
func (m *SingleGetValueResponse) XXX_Unmarshal(b []byte) error {
@@ -3161,7 +3545,7 @@
func (m *SingleSetValueRequest) String() string { return proto.CompactTextString(m) }
func (*SingleSetValueRequest) ProtoMessage() {}
func (*SingleSetValueRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{29}
+ return fileDescriptor_7ecf6e9799a9202d, []int{34}
}
func (m *SingleSetValueRequest) XXX_Unmarshal(b []byte) error {
@@ -3207,7 +3591,7 @@
func (m *SingleSetValueResponse) String() string { return proto.CompactTextString(m) }
func (*SingleSetValueResponse) ProtoMessage() {}
func (*SingleSetValueResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7ecf6e9799a9202d, []int{30}
+ return fileDescriptor_7ecf6e9799a9202d, []int{35}
}
func (m *SingleSetValueResponse) XXX_Unmarshal(b []byte) error {
@@ -3266,10 +3650,15 @@
proto.RegisterType((*GetOnuCountersRequest)(nil), "extension.GetOnuCountersRequest")
proto.RegisterType((*GetOmciEthernetFrameExtendedPmRequest)(nil), "extension.GetOmciEthernetFrameExtendedPmRequest")
proto.RegisterType((*GetRxPowerRequest)(nil), "extension.GetRxPowerRequest")
+ proto.RegisterType((*GetOltRxPowerRequest)(nil), "extension.GetOltRxPowerRequest")
proto.RegisterType((*GetOnuCountersResponse)(nil), "extension.GetOnuCountersResponse")
proto.RegisterType((*OmciEthernetFrameExtendedPm)(nil), "extension.OmciEthernetFrameExtendedPm")
proto.RegisterType((*GetOmciEthernetFrameExtendedPmResponse)(nil), "extension.GetOmciEthernetFrameExtendedPmResponse")
+ proto.RegisterType((*RxPower)(nil), "extension.RxPower")
+ proto.RegisterType((*GetOltRxPowerResponse)(nil), "extension.GetOltRxPowerResponse")
proto.RegisterType((*GetRxPowerResponse)(nil), "extension.GetRxPowerResponse")
+ proto.RegisterType((*GetOnuOmciTxRxStatsRequest)(nil), "extension.GetOnuOmciTxRxStatsRequest")
+ proto.RegisterType((*GetOnuOmciTxRxStatsResponse)(nil), "extension.GetOnuOmciTxRxStatsResponse")
proto.RegisterType((*GetValueRequest)(nil), "extension.GetValueRequest")
proto.RegisterType((*GetValueResponse)(nil), "extension.GetValueResponse")
proto.RegisterType((*SetValueRequest)(nil), "extension.SetValueRequest")
@@ -3283,209 +3672,230 @@
func init() { proto.RegisterFile("voltha_protos/extensions.proto", fileDescriptor_7ecf6e9799a9202d) }
var fileDescriptor_7ecf6e9799a9202d = []byte{
- // 3230 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x6f, 0x1b, 0xc9,
- 0xb1, 0x17, 0x29, 0x89, 0x22, 0x8b, 0x22, 0x45, 0xb5, 0xbe, 0x28, 0xc9, 0x9f, 0xb3, 0x58, 0xdb,
- 0x6f, 0xe1, 0xa5, 0x4d, 0xae, 0xe4, 0xd5, 0xf3, 0xee, 0x03, 0x56, 0x23, 0x8e, 0x44, 0xc2, 0x32,
- 0x49, 0x37, 0x49, 0x7b, 0xfd, 0x80, 0x87, 0xc1, 0x88, 0xd3, 0x92, 0x07, 0x4b, 0xce, 0xf0, 0xcd,
- 0x34, 0xb5, 0x72, 0xae, 0xc9, 0x2d, 0xc9, 0x29, 0x97, 0xfc, 0x0d, 0x01, 0x82, 0x1c, 0x72, 0xc8,
- 0x3d, 0xe7, 0xfc, 0x13, 0x01, 0xf2, 0x17, 0x04, 0x39, 0x07, 0x41, 0x7f, 0xcc, 0x27, 0x29, 0xd9,
- 0xde, 0xe4, 0x62, 0xb3, 0xab, 0x7e, 0xf5, 0xeb, 0x9e, 0xea, 0xaa, 0xea, 0x9a, 0x1e, 0xc1, 0x9d,
- 0x4b, 0x67, 0x48, 0xdf, 0x19, 0xfa, 0xd8, 0x75, 0xa8, 0xe3, 0x3d, 0x21, 0x57, 0x94, 0xd8, 0x9e,
- 0xe5, 0xd8, 0x5e, 0x85, 0x4b, 0x50, 0x2e, 0x90, 0xec, 0x4c, 0x43, 0xf5, 0x81, 0x63, 0x9f, 0x5b,
- 0x17, 0x02, 0xba, 0xb3, 0x7b, 0xe1, 0x38, 0x17, 0x43, 0xf2, 0x84, 0x8f, 0xce, 0x26, 0xe7, 0x4f,
- 0xc8, 0x68, 0x4c, 0xdf, 0x0b, 0xa5, 0xf2, 0x7f, 0x90, 0x7d, 0x6d, 0x0c, 0x27, 0xa4, 0x4b, 0x28,
- 0x2a, 0x42, 0xda, 0x32, 0xcb, 0xa9, 0x7b, 0xa9, 0x47, 0x39, 0x9c, 0xb6, 0x4c, 0x74, 0x00, 0xcb,
- 0xc6, 0xd0, 0x70, 0x47, 0x92, 0xae, 0x9c, 0xbe, 0x97, 0x7a, 0x94, 0xaf, 0xad, 0x55, 0x24, 0xfb,
- 0x21, 0xd3, 0x1d, 0xf1, 0xdf, 0x8d, 0x39, 0x9c, 0x37, 0xc2, 0xa1, 0xba, 0x04, 0x8b, 0x97, 0x8c,
- 0x55, 0x79, 0x0c, 0x39, 0x4e, 0xdf, 0x7b, 0x3f, 0x26, 0xca, 0x5d, 0x58, 0x60, 0xff, 0xa3, 0x1c,
- 0x2c, 0x6a, 0x2f, 0x3b, 0xbd, 0xb7, 0xa5, 0x39, 0xb4, 0x0c, 0xd9, 0x7a, 0xb3, 0xdb, 0x3b, 0x6c,
- 0x1d, 0x69, 0xa5, 0x94, 0xf2, 0x0a, 0x8a, 0x62, 0x31, 0x63, 0x32, 0xb0, 0xce, 0x2d, 0xe2, 0x4e,
- 0x2d, 0xe9, 0x89, 0x24, 0xe6, 0x6b, 0x29, 0xd6, 0xb6, 0x2b, 0x81, 0x1b, 0x2a, 0xc1, 0x3c, 0x15,
- 0xf6, 0x0f, 0x96, 0x0b, 0xa0, 0xb0, 0x8c, 0x09, 0x9d, 0xb8, 0x36, 0x57, 0x7b, 0xa8, 0x04, 0xf3,
- 0x5d, 0x42, 0x39, 0x63, 0x01, 0xb3, 0x9f, 0xe8, 0x1e, 0xe4, 0xfb, 0xb6, 0x37, 0x19, 0x8f, 0x1d,
- 0x97, 0x12, 0x93, 0x13, 0x17, 0x70, 0x54, 0x84, 0xd6, 0x61, 0x51, 0x73, 0x5d, 0xc7, 0x2d, 0xcf,
- 0x73, 0x9d, 0x18, 0xa0, 0x1d, 0xc8, 0xd6, 0x2d, 0x8f, 0x1a, 0xf6, 0x80, 0x94, 0x17, 0xb8, 0x22,
- 0x18, 0x2b, 0xcf, 0x00, 0x9d, 0x10, 0xea, 0x0f, 0x31, 0xf9, 0xff, 0x09, 0xf1, 0xf8, 0x4c, 0x8e,
- 0x3d, 0xa9, 0x93, 0x4b, 0x6b, 0x40, 0x9a, 0xfe, 0x53, 0x45, 0x45, 0x4a, 0x15, 0xd6, 0x62, 0x76,
- 0xde, 0xd8, 0xb1, 0x3d, 0xc2, 0xa6, 0x32, 0xfd, 0xa9, 0xc4, 0xca, 0x83, 0xb1, 0x52, 0x83, 0xf5,
- 0x13, 0x42, 0xdb, 0xf6, 0xa4, 0x6f, 0x5b, 0x4d, 0xfb, 0xdc, 0xf1, 0x27, 0xdb, 0x81, 0xec, 0x84,
- 0x49, 0x4c, 0x72, 0xe5, 0xdb, 0xf8, 0x63, 0xe5, 0xaf, 0x0b, 0xb0, 0x91, 0x30, 0x92, 0x33, 0x75,
- 0x20, 0x6b, 0x98, 0xa3, 0x2e, 0x35, 0xa8, 0x98, 0xa9, 0x58, 0xdb, 0x8b, 0xb8, 0x78, 0xa6, 0x4d,
- 0xe5, 0xd0, 0x1c, 0x59, 0xb6, 0xe5, 0x51, 0xd7, 0xa0, 0xd6, 0x25, 0xe1, 0xb6, 0x38, 0x60, 0x41,
- 0x6d, 0xc8, 0x39, 0x63, 0xe2, 0x0a, 0x4a, 0xb1, 0x6b, 0xd5, 0x0f, 0x52, 0xb6, 0xc7, 0x84, 0xb1,
- 0x39, 0xb6, 0x31, 0x14, 0x7c, 0x21, 0x07, 0x23, 0x14, 0x01, 0xd8, 0xb4, 0x4d, 0xbe, 0x23, 0x1f,
- 0x43, 0x28, 0xe2, 0x72, 0x22, 0x48, 0x9b, 0xb6, 0x89, 0x43, 0x0e, 0xe5, 0xcf, 0x29, 0x28, 0x25,
- 0xf5, 0x08, 0x20, 0xd3, 0x6f, 0xbd, 0x68, 0xbf, 0x69, 0x95, 0xe6, 0x10, 0x82, 0x62, 0x4f, 0x6b,
- 0xe9, 0xea, 0x61, 0x57, 0xd3, 0x7b, 0xfa, 0x71, 0xfd, 0xfb, 0x52, 0x0a, 0x6d, 0x02, 0x6a, 0xf4,
- 0x5b, 0x75, 0xac, 0xd5, 0xa3, 0xf2, 0x34, 0x2a, 0xc3, 0xfa, 0x49, 0xf3, 0xe4, 0x50, 0x6d, 0xf6,
- 0x74, 0xad, 0xd7, 0xd0, 0x70, 0x4b, 0x13, 0x9a, 0x79, 0x66, 0xc1, 0x58, 0x4e, 0xe2, 0xf2, 0x85,
- 0x04, 0x7b, 0xa3, 0xfe, 0x7d, 0x69, 0x71, 0x06, 0x3b, 0x93, 0x67, 0x66, 0xb2, 0x33, 0xcd, 0x92,
- 0x72, 0x02, 0x6b, 0x33, 0xf6, 0x81, 0x11, 0x1d, 0xd6, 0x5f, 0x76, 0x7b, 0x87, 0x3d, 0x4d, 0xef,
- 0xb7, 0xea, 0xda, 0x71, 0xb3, 0xa5, 0xd5, 0x4b, 0x73, 0xec, 0xf1, 0x4e, 0xdb, 0x47, 0x2f, 0xb4,
- 0x7a, 0x29, 0xc5, 0x72, 0xb0, 0xdf, 0x92, 0xa3, 0xb4, 0x72, 0x0c, 0xa5, 0xa4, 0xf7, 0xd1, 0x16,
- 0xac, 0xb5, 0x3b, 0x1a, 0x9e, 0xa6, 0xc9, 0xc3, 0x92, 0xd6, 0x3a, 0x54, 0x4f, 0x7d, 0x9e, 0x7a,
- 0xb3, 0x2b, 0x46, 0x69, 0xe5, 0x4f, 0x29, 0x9e, 0x03, 0xed, 0x21, 0xed, 0x38, 0x2e, 0x3d, 0x72,
- 0x26, 0x36, 0x25, 0xae, 0x87, 0x36, 0x21, 0xc3, 0xb2, 0xaa, 0xe5, 0xc8, 0xa0, 0x94, 0x23, 0xa4,
- 0x42, 0x96, 0xfd, 0x62, 0xa9, 0x2b, 0xa3, 0xe4, 0x41, 0x62, 0x53, 0xe3, 0x44, 0x95, 0x8e, 0x44,
- 0xe3, 0xc0, 0x4e, 0xd1, 0x20, 0xeb, 0x4b, 0x51, 0x09, 0x96, 0xd9, 0x6f, 0xbd, 0xdf, 0x7a, 0xd1,
- 0x12, 0xbb, 0xb8, 0x01, 0xab, 0x5c, 0x12, 0x38, 0xae, 0xd5, 0x6a, 0x96, 0x52, 0x01, 0xb0, 0xd3,
- 0x6e, 0xe9, 0xed, 0xd3, 0x5e, 0x29, 0xad, 0xfc, 0x65, 0x1e, 0x76, 0xa6, 0x27, 0x0c, 0x52, 0xa4,
- 0x0c, 0x4b, 0xf4, 0x4a, 0x7d, 0x4f, 0x89, 0xc7, 0x1f, 0x61, 0x01, 0xfb, 0x43, 0xa6, 0x71, 0xa5,
- 0x26, 0x2d, 0x34, 0x72, 0x88, 0x6e, 0x41, 0x8e, 0x5e, 0x75, 0x8c, 0xc1, 0x0f, 0x84, 0x7a, 0x3c,
- 0x66, 0x17, 0x70, 0x28, 0x60, 0x5a, 0x37, 0xd0, 0x2e, 0x08, 0x6d, 0x20, 0x40, 0x0f, 0xa0, 0x48,
- 0xaf, 0x78, 0xc9, 0xf1, 0x21, 0x8b, 0x1c, 0x92, 0x90, 0x32, 0x9c, 0x1b, 0xc7, 0x65, 0x04, 0xce,
- 0x9d, 0xc2, 0xd1, 0x2b, 0x75, 0x60, 0x78, 0xd4, 0xc7, 0x2d, 0xf9, 0x7c, 0x51, 0xa9, 0xe0, 0x8b,
- 0xe1, 0xb2, 0x3e, 0x5f, 0x12, 0x47, 0xaf, 0xfa, 0x51, 0x5c, 0xce, 0xe7, 0xeb, 0x4f, 0xf1, 0xc5,
- 0x70, 0xe0, 0xf3, 0xf5, 0xa7, 0xf8, 0x5e, 0x46, 0x71, 0x79, 0x9f, 0xef, 0xe5, 0x14, 0x5f, 0x0c,
- 0xb7, 0xec, 0xf3, 0x45, 0xa5, 0x4a, 0xdd, 0x2f, 0x90, 0x1d, 0xc7, 0x6e, 0x8f, 0xa9, 0x35, 0x30,
- 0x86, 0xac, 0x34, 0xa0, 0xc7, 0xb0, 0xc8, 0x0f, 0x42, 0xbe, 0x8b, 0xf9, 0xda, 0x66, 0x45, 0x1c,
- 0x93, 0x15, 0xff, 0x98, 0xac, 0x68, 0x4c, 0x8b, 0x05, 0x48, 0xf9, 0x45, 0x1a, 0x6e, 0xcd, 0xa2,
- 0x09, 0xc2, 0xe2, 0x0b, 0x28, 0x8d, 0x9d, 0x1f, 0x89, 0x7b, 0x4c, 0x88, 0xf9, 0xda, 0x19, 0x52,
- 0xe3, 0x42, 0x54, 0xd0, 0x34, 0x9e, 0x92, 0xa3, 0x1a, 0xac, 0xbb, 0x64, 0x40, 0xac, 0x4b, 0x62,
- 0x4a, 0xaa, 0x0e, 0x83, 0xf0, 0xa8, 0x49, 0xe3, 0x99, 0x3a, 0xf4, 0x0c, 0x36, 0x47, 0xc4, 0xf0,
- 0xa7, 0x3e, 0x35, 0x26, 0xf6, 0xe0, 0x9d, 0xb0, 0x9a, 0xe7, 0x56, 0xd7, 0x68, 0xd9, 0xba, 0x86,
- 0x86, 0x47, 0x5c, 0xd5, 0x32, 0xbc, 0xa3, 0x89, 0xeb, 0x12, 0x9b, 0xf2, 0x18, 0x4b, 0xe3, 0x29,
- 0x39, 0x3b, 0xa0, 0x28, 0x19, 0xf1, 0xec, 0x9f, 0xb8, 0x84, 0xc7, 0x59, 0x1a, 0x47, 0x45, 0xca,
- 0x1f, 0x52, 0x70, 0x57, 0xb8, 0x41, 0xa3, 0xef, 0x88, 0x6b, 0x13, 0xaa, 0xba, 0x96, 0x79, 0x41,
- 0x58, 0xa6, 0x34, 0x2c, 0x8f, 0x3a, 0xee, 0x7b, 0x84, 0x21, 0x67, 0x5a, 0x2e, 0x19, 0xb0, 0x0a,
- 0x72, 0xed, 0x21, 0x72, 0xad, 0x79, 0xa5, 0xee, 0xdb, 0xe2, 0x90, 0x46, 0x39, 0x80, 0x5c, 0x20,
- 0x47, 0x05, 0xc8, 0x45, 0x8b, 0x10, 0xab, 0x5f, 0x9d, 0x6e, 0x0f, 0x6b, 0x87, 0x2f, 0x4b, 0x29,
- 0x54, 0x04, 0xa8, 0xb7, 0xdf, 0xb4, 0xe4, 0x38, 0xad, 0xfc, 0x66, 0x11, 0x1e, 0x7e, 0x60, 0xca,
- 0x60, 0x0f, 0xef, 0x00, 0x98, 0xae, 0x33, 0xd6, 0x2e, 0x89, 0x4d, 0x3d, 0x59, 0xa0, 0x22, 0x12,
- 0x56, 0xbc, 0x9c, 0x01, 0x65, 0xa1, 0x26, 0xba, 0x04, 0x39, 0x62, 0x89, 0x3f, 0x8e, 0x24, 0x77,
- 0x01, 0xfb, 0x43, 0xe6, 0xfd, 0x33, 0xd7, 0x31, 0xcc, 0x68, 0x98, 0x8a, 0x66, 0x61, 0x4a, 0xce,
- 0xb0, 0xa3, 0xc9, 0x90, 0x6d, 0x60, 0x88, 0x5d, 0x14, 0xd8, 0xa4, 0x1c, 0x3d, 0x86, 0xd5, 0x81,
- 0x3b, 0xe0, 0x79, 0x4d, 0xcc, 0x68, 0xbe, 0x17, 0xf0, 0xb4, 0x82, 0x31, 0x4f, 0x6c, 0x93, 0xb8,
- 0x9e, 0xf5, 0x33, 0x12, 0x4d, 0xfa, 0x02, 0x9e, 0x92, 0xa3, 0x47, 0xb0, 0xe2, 0x5c, 0xc6, 0xa1,
- 0x59, 0x0e, 0x4d, 0x8a, 0x19, 0x52, 0x3e, 0xe6, 0xb3, 0x3d, 0xe9, 0x96, 0x9c, 0x40, 0x26, 0xc4,
- 0x2c, 0xde, 0x7d, 0xd1, 0x7e, 0xcf, 0xa9, 0xd6, 0xbe, 0x96, 0x70, 0xe0, 0xf0, 0x99, 0x3a, 0xb4,
- 0x07, 0x1b, 0x52, 0x5e, 0xad, 0x1d, 0xf4, 0x9c, 0xda, 0xfe, 0x7e, 0x5b, 0x18, 0xe5, 0xb9, 0xd1,
- 0x6c, 0x65, 0xc4, 0xaa, 0xb6, 0xff, 0xac, 0xe7, 0xec, 0x57, 0xab, 0x72, 0xaa, 0xe5, 0x98, 0x55,
- 0x5c, 0xc9, 0x72, 0x4b, 0x2a, 0xf6, 0xab, 0xb5, 0x9e, 0x53, 0x7d, 0x5a, 0xfb, 0x4a, 0x9a, 0x15,
- 0xb8, 0xd9, 0x35, 0x5a, 0x74, 0x00, 0x5b, 0xfe, 0x32, 0x9e, 0xd6, 0xf6, 0x7a, 0x4e, 0x75, 0xbf,
- 0x7a, 0x20, 0x0d, 0x8b, 0xdc, 0xf0, 0x3a, 0xb5, 0xf2, 0x1d, 0x94, 0x44, 0x50, 0x1e, 0x93, 0x81,
- 0x9f, 0x37, 0x9f, 0x56, 0x90, 0xfe, 0x9e, 0x82, 0x72, 0x92, 0x22, 0x08, 0xe4, 0x07, 0x50, 0x1c,
- 0x38, 0x2e, 0xcb, 0x17, 0x62, 0x86, 0x47, 0x55, 0x01, 0x27, 0xa4, 0xa8, 0x02, 0x28, 0x90, 0x1c,
- 0x39, 0x26, 0x79, 0xe3, 0xb8, 0xa6, 0x1f, 0xdc, 0x33, 0x34, 0x2c, 0x41, 0xce, 0xc9, 0xa0, 0x4b,
- 0x06, 0x8e, 0x6d, 0xfa, 0xb1, 0x1e, 0x91, 0xf0, 0xda, 0xed, 0x50, 0x63, 0x18, 0x72, 0x89, 0x60,
- 0x4f, 0x48, 0x99, 0xc3, 0x27, 0xb6, 0xe4, 0x37, 0xce, 0x86, 0x24, 0xc4, 0x8b, 0x80, 0xbf, 0x46,
- 0xab, 0x9c, 0xf8, 0x7d, 0x6b, 0x78, 0x2a, 0x8b, 0x6e, 0x77, 0x0b, 0x96, 0x2c, 0x9b, 0x9e, 0xeb,
- 0xf2, 0x65, 0x61, 0x09, 0x67, 0xd8, 0xb0, 0x69, 0xa2, 0x0d, 0xc8, 0x38, 0xf6, 0x84, 0xc9, 0xd3,
- 0x5c, 0xbe, 0xe8, 0xd8, 0x93, 0xa6, 0xa9, 0xfc, 0x3a, 0x05, 0x9f, 0x33, 0xa6, 0xd1, 0xc0, 0xf2,
- 0xcb, 0xc2, 0xb1, 0x6b, 0x8c, 0x88, 0xc6, 0xca, 0x94, 0x49, 0xcc, 0xce, 0xe8, 0xa3, 0x9b, 0x76,
- 0x74, 0x2b, 0xd2, 0x69, 0x73, 0xd7, 0x35, 0xe6, 0xc2, 0x5e, 0x9b, 0xbd, 0x3c, 0xb8, 0xc4, 0x23,
- 0x94, 0x7b, 0x2b, 0x8b, 0xc5, 0x40, 0x2d, 0xc2, 0xb2, 0xe5, 0xe9, 0x13, 0xdb, 0xd2, 0x2d, 0xde,
- 0x91, 0x1f, 0xc1, 0xea, 0x09, 0xa1, 0xf8, 0x8a, 0xd7, 0xec, 0x9f, 0xfa, 0x50, 0x7f, 0x5b, 0x86,
- 0xcd, 0xa4, 0x7b, 0x64, 0x40, 0x6c, 0x27, 0xa8, 0x1a, 0x73, 0x01, 0xd9, 0x56, 0x9c, 0xac, 0x91,
- 0x92, 0x74, 0xe8, 0x21, 0x14, 0xc7, 0x8e, 0x67, 0xb1, 0x66, 0x52, 0x37, 0x5d, 0xeb, 0x5c, 0x3c,
- 0x42, 0xa6, 0x91, 0xc6, 0x05, 0x5f, 0x5e, 0x67, 0x62, 0x06, 0xb4, 0xc9, 0x85, 0x11, 0x01, 0x2e,
- 0x70, 0xe0, 0x3c, 0x2e, 0xf8, 0x72, 0x01, 0x7c, 0x0e, 0x65, 0x93, 0x0c, 0xad, 0x91, 0x45, 0x89,
- 0xab, 0x8f, 0x2c, 0xcf, 0xd3, 0x4d, 0x42, 0xe5, 0x41, 0xb1, 0xc8, 0x4d, 0x16, 0xf0, 0x66, 0x80,
- 0x78, 0x69, 0x79, 0x5e, 0xdd, 0xd7, 0xa3, 0xbb, 0x00, 0x67, 0xd6, 0x58, 0x27, 0xac, 0xb2, 0x89,
- 0x52, 0x97, 0x69, 0x2c, 0xe2, 0xdc, 0x99, 0x35, 0xe6, 0xc5, 0xce, 0x43, 0xb7, 0x81, 0x0d, 0x98,
- 0x4f, 0x65, 0x75, 0xcb, 0x34, 0x32, 0x38, 0x7b, 0x66, 0x8d, 0xfb, 0x4c, 0xc2, 0x2a, 0xc3, 0x39,
- 0x19, 0xe8, 0x41, 0x50, 0xeb, 0xde, 0xfb, 0xd1, 0x99, 0x33, 0x14, 0xd5, 0x2d, 0xd3, 0x58, 0xc2,
- 0x6b, 0xe7, 0x64, 0x70, 0xe4, 0x6b, 0xbb, 0x42, 0xc9, 0x32, 0x5c, 0x58, 0x99, 0xe4, 0x47, 0x16,
- 0x81, 0xa1, 0x3d, 0xaf, 0x75, 0x99, 0x46, 0x16, 0x6f, 0x70, 0x3b, 0xa9, 0x0f, 0x08, 0xd0, 0x77,
- 0xb0, 0x1b, 0xb7, 0x8c, 0x85, 0x34, 0x2f, 0x7d, 0x99, 0x46, 0x0e, 0x6f, 0x47, 0xad, 0xfb, 0x51,
- 0x08, 0xfa, 0x1c, 0x0a, 0x31, 0x06, 0x5e, 0xf9, 0x32, 0x0d, 0xc0, 0xcb, 0x51, 0x1b, 0xf4, 0x14,
- 0xd6, 0xe2, 0x0f, 0x26, 0x3c, 0xb0, 0xcc, 0xc1, 0x79, 0xbc, 0x1a, 0x7d, 0x2c, 0xe1, 0x8a, 0x47,
- 0xb0, 0x72, 0x75, 0x41, 0x46, 0xfa, 0x0f, 0xe4, 0xbd, 0xef, 0xcf, 0x02, 0x47, 0x2f, 0xe3, 0x02,
- 0x53, 0xbc, 0x20, 0xef, 0x43, 0x9f, 0x72, 0xe4, 0xd0, 0xf1, 0x44, 0x49, 0xcb, 0x34, 0x0a, 0x38,
- 0xcb, 0x44, 0xa7, 0x8e, 0xc7, 0x89, 0xdc, 0x2b, 0x7d, 0x3c, 0x74, 0x8c, 0x91, 0x27, 0x98, 0xca,
- 0x2b, 0x1c, 0x54, 0xc4, 0x05, 0xf7, 0xaa, 0xc3, 0xe5, 0xe2, 0x65, 0xf9, 0x4b, 0x40, 0x21, 0xd2,
- 0x76, 0x6c, 0xdd, 0x32, 0x87, 0xa4, 0x5c, 0xe2, 0xe0, 0x15, 0xbc, 0xe2, 0x83, 0x5b, 0x8e, 0xdd,
- 0x34, 0x87, 0x3c, 0x5c, 0xdd, 0x2b, 0xdd, 0x19, 0x0d, 0xac, 0xf2, 0x2a, 0xc7, 0x94, 0x70, 0xc6,
- 0xbd, 0x62, 0xd9, 0xca, 0x54, 0x54, 0xaa, 0x10, 0x57, 0xad, 0xe2, 0x0c, 0x15, 0xaa, 0xe7, 0xb0,
- 0x2d, 0xad, 0x74, 0x59, 0x77, 0xf5, 0x81, 0x3b, 0x90, 0x0b, 0x5b, 0xe3, 0x60, 0x84, 0x37, 0x04,
- 0x8f, 0x3c, 0xc4, 0x8e, 0xe4, 0x59, 0x89, 0x76, 0x21, 0xeb, 0x5e, 0xe9, 0x67, 0xbc, 0x56, 0xae,
- 0x73, 0xe8, 0x5a, 0xd8, 0xbe, 0xdf, 0x05, 0x60, 0xab, 0x97, 0xc7, 0xe1, 0x06, 0x57, 0xaf, 0x47,
- 0x7b, 0xf4, 0x5d, 0xc8, 0x52, 0xdf, 0x7a, 0x93, 0xab, 0x37, 0xc2, 0xd7, 0x82, 0xbb, 0x00, 0x34,
- 0xb4, 0xde, 0xe2, 0xea, 0xcd, 0x68, 0xff, 0xff, 0x19, 0x2c, 0x9f, 0x11, 0x57, 0x77, 0x89, 0xbc,
- 0x82, 0x28, 0x73, 0xc8, 0x16, 0xce, 0x9f, 0xb1, 0x5a, 0x20, 0x2f, 0x21, 0xee, 0x43, 0x7e, 0x38,
- 0x30, 0x2f, 0xfc, 0x0d, 0xdb, 0xe6, 0x98, 0x32, 0x06, 0x26, 0x94, 0xbb, 0xc5, 0x96, 0x69, 0x5a,
- 0x3e, 0x62, 0x87, 0x23, 0xb6, 0x71, 0xce, 0x35, 0x2d, 0x09, 0xb8, 0x03, 0x39, 0x6a, 0x8d, 0x88,
- 0x47, 0x8d, 0xd1, 0xb8, 0xbc, 0xcb, 0xb3, 0x7d, 0x07, 0x87, 0x22, 0x75, 0x19, 0xc0, 0xf2, 0x74,
- 0x59, 0x28, 0xd4, 0x3c, 0xe4, 0x2c, 0x4f, 0x17, 0xb5, 0x41, 0x5d, 0x83, 0x55, 0xcb, 0xd3, 0xe3,
- 0xf5, 0x40, 0x0a, 0xe3, 0xb9, 0xaf, 0xde, 0x86, 0x5d, 0x8b, 0x25, 0xf6, 0xec, 0x3c, 0x57, 0x57,
- 0xa0, 0x60, 0x79, 0x7a, 0x98, 0xca, 0xb2, 0x14, 0x06, 0xa9, 0xab, 0xee, 0x40, 0xd9, 0xf2, 0xf4,
- 0x99, 0xb9, 0xaa, 0xde, 0x82, 0x9d, 0x40, 0x37, 0x95, 0x91, 0xea, 0x3d, 0xb8, 0x33, 0xa5, 0x8d,
- 0x65, 0x9d, 0x8a, 0xa0, 0x94, 0x44, 0xa8, 0x65, 0xd8, 0x9c, 0x9a, 0x4f, 0xac, 0x64, 0x1d, 0x90,
- 0xe5, 0xe9, 0x89, 0x54, 0x91, 0xeb, 0x0d, 0xd2, 0x42, 0xa2, 0x12, 0x79, 0xa0, 0x6e, 0xc1, 0x46,
- 0x4c, 0xea, 0xc7, 0xbc, 0xf4, 0xb1, 0x8c, 0x53, 0x39, 0x92, 0x01, 0xad, 0xde, 0x81, 0x5b, 0xa1,
- 0x6e, 0x3a, 0x86, 0xd5, 0x02, 0xe4, 0x85, 0x9e, 0x47, 0x9a, 0x74, 0x65, 0x18, 0x99, 0x52, 0x4f,
- 0xe3, 0xfa, 0x30, 0xf6, 0xd4, 0x55, 0x58, 0x61, 0xae, 0x8e, 0xc4, 0x9a, 0x5a, 0x82, 0xa2, 0xe5,
- 0xe9, 0x91, 0xc8, 0xf2, 0x59, 0x83, 0x40, 0x92, 0x0f, 0x1c, 0x44, 0x89, 0xf2, 0xab, 0x45, 0xd8,
- 0xbd, 0xe1, 0xe0, 0x44, 0x77, 0x21, 0xcf, 0x7a, 0x66, 0x9d, 0x84, 0x6d, 0x74, 0xe6, 0x86, 0x36,
- 0x3a, 0x13, 0xb4, 0xd1, 0x9b, 0x90, 0x39, 0x67, 0x5c, 0xa2, 0xb3, 0xc8, 0x60, 0x39, 0x42, 0xff,
- 0x15, 0x69, 0xa2, 0x75, 0x89, 0xe0, 0x27, 0x0c, 0x5e, 0x09, 0xe4, 0xc7, 0x01, 0x34, 0xe8, 0x95,
- 0x7d, 0xe8, 0xa2, 0x80, 0x06, 0x72, 0x09, 0x7d, 0x0c, 0x28, 0xf0, 0x2c, 0x31, 0x7d, 0x30, 0x3f,
- 0x58, 0x70, 0x29, 0xec, 0xa1, 0x43, 0xe2, 0xa0, 0x55, 0xf6, 0xb1, 0x4b, 0x82, 0x38, 0x90, 0x4b,
- 0xe8, 0xc3, 0xb0, 0x83, 0xf6, 0x91, 0xfc, 0x8c, 0xc1, 0x45, 0x5f, 0x2c, 0x81, 0x8f, 0xa0, 0x24,
- 0xf4, 0xfa, 0xb3, 0x3d, 0x3d, 0xd2, 0x41, 0x67, 0x70, 0x51, 0xc8, 0x9f, 0xed, 0x05, 0x6d, 0xed,
- 0x96, 0x8f, 0xdc, 0xd7, 0xa9, 0xa3, 0x57, 0x6b, 0x5f, 0xeb, 0x91, 0x1e, 0x3a, 0x83, 0xd7, 0xa4,
- 0x81, 0x68, 0xa1, 0xdb, 0x7e, 0x5b, 0x5b, 0x96, 0x56, 0xd5, 0xda, 0x01, 0x33, 0xab, 0xed, 0xef,
- 0xfb, 0x66, 0xfc, 0x2c, 0xc1, 0xeb, 0x42, 0x9f, 0x68, 0xa2, 0x43, 0xbb, 0xda, 0xfe, 0x33, 0x66,
- 0xb7, 0x5f, 0xad, 0xea, 0x91, 0x3e, 0x3a, 0xb0, 0xf3, 0xdb, 0xe8, 0xb6, 0xdf, 0x0e, 0x6f, 0x4b,
- 0xbb, 0xfd, 0x6a, 0x8d, 0x2f, 0xf3, 0x69, 0xed, 0x2b, 0x3d, 0xd2, 0x49, 0x67, 0xf0, 0x86, 0x00,
- 0x04, 0x8d, 0xb4, 0xb4, 0x7c, 0x0e, 0x3b, 0xfe, 0x4a, 0x9f, 0xd6, 0xf6, 0xb8, 0xe9, 0x7e, 0xf5,
- 0x40, 0x8f, 0xf4, 0xd2, 0x19, 0xbc, 0x29, 0xd7, 0x1a, 0xb4, 0xd2, 0xc2, 0x56, 0xf9, 0x47, 0x1a,
- 0x1e, 0x7c, 0xa8, 0x95, 0x93, 0x5d, 0x90, 0x0a, 0xd9, 0xc9, 0xd8, 0xa3, 0x2e, 0x31, 0x46, 0xb2,
- 0xc9, 0x8e, 0x5e, 0x32, 0xdd, 0xc4, 0x10, 0xd8, 0xa1, 0x63, 0x00, 0xd3, 0xf9, 0xd1, 0x96, 0x2c,
- 0xe9, 0x4f, 0x62, 0x89, 0x58, 0xa2, 0x5f, 0xa6, 0xe0, 0x01, 0x4f, 0x73, 0x22, 0xc1, 0x22, 0x56,
- 0x74, 0x22, 0xe1, 0xfa, 0x78, 0xa4, 0x9f, 0x3b, 0xee, 0xc8, 0xa0, 0xf2, 0x92, 0xf3, 0x20, 0xf1,
- 0x0e, 0xfd, 0xe1, 0xe7, 0xad, 0x1c, 0x73, 0x7b, 0x7c, 0xdf, 0xb9, 0x1e, 0x2b, 0x20, 0xca, 0x53,
- 0xc8, 0x88, 0x5f, 0xfc, 0x3a, 0xb2, 0xd1, 0xc4, 0xbd, 0xb7, 0x7a, 0xef, 0x4d, 0x5b, 0x57, 0x9b,
- 0x3d, 0x71, 0x01, 0xda, 0x6d, 0x7e, 0xdf, 0x7b, 0xab, 0x1f, 0xb7, 0xfb, 0x98, 0xcb, 0x52, 0xca,
- 0x6f, 0xc5, 0xfd, 0x5e, 0xd0, 0xb2, 0x4a, 0x17, 0x7f, 0x62, 0xcf, 0xca, 0x72, 0xde, 0xa3, 0x06,
- 0x9d, 0x88, 0x9c, 0xcf, 0x61, 0x39, 0x62, 0x45, 0xe4, 0xdc, 0xb0, 0x86, 0xba, 0x4b, 0x0c, 0xcf,
- 0xb1, 0x79, 0xba, 0xe7, 0x30, 0x30, 0x11, 0xe6, 0x12, 0xb4, 0xcd, 0x0f, 0x6c, 0x7e, 0xb5, 0xc2,
- 0x33, 0x3c, 0xc5, 0x8e, 0x6b, 0xbe, 0x16, 0xe5, 0xe7, 0x8b, 0xb0, 0x72, 0x42, 0x28, 0xbf, 0xf1,
- 0xf7, 0x7b, 0xe9, 0x6f, 0x12, 0x57, 0xe8, 0xf9, 0xda, 0xed, 0xb8, 0x3f, 0x13, 0x97, 0xf5, 0xac,
- 0x87, 0xf7, 0x0d, 0xd0, 0x37, 0xb0, 0x34, 0x11, 0x17, 0xca, 0x72, 0xc3, 0xef, 0x5e, 0x7f, 0xe1,
- 0xec, 0x5b, 0xfb, 0x16, 0xe8, 0x10, 0xf2, 0x8e, 0xb8, 0x4a, 0xe4, 0x04, 0xf3, 0xb3, 0x26, 0x4f,
- 0xdc, 0x35, 0x36, 0xe6, 0x70, 0xd4, 0x06, 0x35, 0xa1, 0xe8, 0xd8, 0x93, 0xc8, 0xad, 0x13, 0xf7,
- 0xc7, 0xac, 0x65, 0xc4, 0x2f, 0xa7, 0x1a, 0x73, 0x38, 0x61, 0x88, 0x30, 0x14, 0x08, 0x7d, 0x17,
- 0x5e, 0x81, 0x70, 0xdf, 0xe5, 0x6b, 0x5f, 0x7c, 0xfc, 0x05, 0x4d, 0x63, 0x0e, 0xc7, 0x29, 0xd0,
- 0xff, 0xf0, 0xb7, 0x42, 0xa9, 0xe6, 0x15, 0x34, 0x5f, 0xdb, 0x9d, 0x22, 0x0c, 0x5f, 0x53, 0x1b,
- 0x73, 0x38, 0x62, 0x80, 0x54, 0x00, 0x87, 0xaf, 0x9c, 0x3f, 0xd9, 0x12, 0x37, 0xbf, 0x37, 0x65,
- 0x9e, 0x78, 0xe3, 0x63, 0x1c, 0xa1, 0x15, 0x3a, 0x85, 0x25, 0x16, 0x4f, 0x8c, 0x20, 0xcb, 0x09,
- 0x9e, 0x7e, 0x42, 0xb6, 0x04, 0x5b, 0x26, 0x29, 0xd0, 0x01, 0xf8, 0xb1, 0xc4, 0xeb, 0x71, 0xbe,
- 0x76, 0x2b, 0xce, 0x16, 0x7f, 0x4f, 0x63, 0x96, 0x12, 0xae, 0xe6, 0x60, 0xc9, 0x15, 0x52, 0xe5,
- 0x77, 0x59, 0xfe, 0x8e, 0x2f, 0xa3, 0x50, 0xa6, 0xc7, 0xf3, 0x20, 0xdc, 0xc5, 0xc5, 0x98, 0x12,
- 0x27, 0x8e, 0x81, 0x2b, 0x5d, 0x8e, 0x0c, 0x52, 0x42, 0x83, 0x1c, 0x71, 0x5d, 0x11, 0xfe, 0xf2,
- 0x8e, 0xfc, 0xe1, 0x4d, 0xe6, 0xfc, 0x00, 0x13, 0x70, 0x1c, 0x5a, 0xa2, 0x6f, 0x23, 0x99, 0x20,
- 0x82, 0xf1, 0xce, 0x75, 0x99, 0x20, 0x88, 0x62, 0xa9, 0xf0, 0x6d, 0x98, 0x0a, 0x0b, 0xd7, 0xec,
- 0x54, 0xe2, 0xdb, 0x4b, 0x34, 0x17, 0x5e, 0xc0, 0xf2, 0x58, 0xc4, 0x39, 0xb5, 0x89, 0xeb, 0xc9,
- 0xe0, 0xfb, 0xfc, 0xc6, 0x64, 0x88, 0xf0, 0xc4, 0x8c, 0xd1, 0xab, 0xa9, 0xac, 0x10, 0xa1, 0xf7,
- 0xf0, 0x03, 0x59, 0x11, 0x21, 0x4c, 0x66, 0xc7, 0x19, 0xac, 0xc6, 0x42, 0x3b, 0x12, 0x91, 0xb5,
- 0x8f, 0xcf, 0x90, 0xc8, 0x04, 0xd3, 0x74, 0x48, 0x8b, 0x65, 0x8b, 0x88, 0xd6, 0xcf, 0x6e, 0xc8,
- 0x96, 0x08, 0x5b, 0x34, 0x6b, 0x5e, 0xf0, 0xa7, 0xef, 0x38, 0xb6, 0xef, 0x27, 0x19, 0xaa, 0xf7,
- 0x6f, 0xc8, 0x9c, 0xd8, 0x73, 0x47, 0x4c, 0x51, 0x9f, 0x5f, 0x72, 0x04, 0x4c, 0xc0, 0x99, 0xaa,
- 0x9f, 0x7c, 0xe0, 0xf0, 0xba, 0x15, 0xf2, 0xa0, 0xff, 0x0e, 0xf3, 0x28, 0x3f, 0xab, 0xec, 0x25,
- 0x0e, 0x8f, 0x48, 0x22, 0x29, 0x55, 0xc8, 0x88, 0xf0, 0x47, 0xeb, 0x50, 0xea, 0xf6, 0x0e, 0x7b,
- 0xfd, 0x6e, 0xec, 0xcb, 0x53, 0x06, 0xd2, 0xed, 0x17, 0xa5, 0x14, 0xff, 0x96, 0x8c, 0x71, 0x1b,
- 0x97, 0xd2, 0xca, 0xef, 0x53, 0x90, 0x8f, 0xc4, 0x3c, 0x33, 0xc4, 0xda, 0x61, 0xb7, 0xdd, 0x8a,
- 0x19, 0xae, 0x40, 0xbe, 0xdf, 0xea, 0xf6, 0x3b, 0x9d, 0x36, 0xee, 0xf1, 0xcf, 0x56, 0x1b, 0xb0,
- 0xda, 0x6c, 0xbd, 0x3e, 0x3c, 0x6d, 0xd6, 0xf5, 0xba, 0xf6, 0xba, 0x79, 0xa4, 0xe9, 0xcd, 0x7a,
- 0x29, 0x1d, 0x15, 0x33, 0xa8, 0xde, 0x7b, 0xdb, 0xd1, 0x4a, 0xf3, 0x28, 0x0f, 0x4b, 0xbd, 0xe6,
- 0x4b, 0xad, 0xdd, 0xef, 0x95, 0x16, 0xd8, 0x0c, 0x3e, 0x06, 0x6b, 0xaf, 0x04, 0x64, 0x91, 0x9d,
- 0x96, 0xcd, 0x56, 0x4f, 0xc3, 0xad, 0xc3, 0x53, 0x5d, 0xac, 0x2d, 0x23, 0x64, 0xd1, 0x49, 0x4a,
- 0x4b, 0x2a, 0x40, 0xd6, 0x95, 0x4f, 0xae, 0xbc, 0x86, 0x95, 0x6e, 0xe2, 0xc4, 0x4a, 0x7e, 0x7d,
- 0x4f, 0x7d, 0xf4, 0xd7, 0xf7, 0x48, 0x11, 0xfa, 0x67, 0x0a, 0x4a, 0xdd, 0x4f, 0x29, 0x42, 0xdd,
- 0x7f, 0xaf, 0x08, 0x75, 0x3f, 0xae, 0x08, 0xfd, 0x94, 0xed, 0xdd, 0xfb, 0x29, 0xbb, 0xab, 0x58,
- 0xb0, 0xd1, 0xb5, 0xec, 0x8b, 0x21, 0x49, 0x36, 0x04, 0x3b, 0x90, 0xa5, 0x86, 0x7b, 0x41, 0x68,
- 0x70, 0xa9, 0x17, 0x8c, 0xd1, 0x5e, 0xe0, 0x40, 0x79, 0xde, 0xef, 0xcc, 0xac, 0xb3, 0x1c, 0x81,
- 0x03, 0x5f, 0xbf, 0x82, 0xcd, 0xe4, 0x54, 0xd2, 0xe1, 0x5f, 0x87, 0x3b, 0x2d, 0xb7, 0x71, 0xf7,
- 0x86, 0xc2, 0x8d, 0xc3, 0xb0, 0x08, 0x56, 0xdf, 0xfd, 0x4f, 0xad, 0xbe, 0xfb, 0xc1, 0xd5, 0x77,
- 0x3f, 0x6d, 0xf5, 0xdd, 0x6b, 0x57, 0x5f, 0xfb, 0x63, 0x0a, 0x72, 0x9a, 0x0f, 0x44, 0x18, 0xf2,
- 0x27, 0x84, 0x6a, 0x57, 0x02, 0x8e, 0xa2, 0xe7, 0xc6, 0xcc, 0x1d, 0xda, 0xb9, 0x7f, 0x03, 0x42,
- 0x2e, 0x0d, 0x43, 0xbe, 0x7b, 0x23, 0x67, 0xf7, 0x83, 0x9c, 0xc9, 0xf5, 0xab, 0x18, 0x6e, 0x3b,
- 0xee, 0x45, 0xc5, 0x19, 0x13, 0x7b, 0xe0, 0xb8, 0x66, 0x45, 0xfc, 0x75, 0x4d, 0x68, 0xf7, 0xbf,
- 0xd5, 0x0b, 0x8b, 0xbe, 0x9b, 0x9c, 0x55, 0x06, 0xce, 0xe8, 0x89, 0x8f, 0x7a, 0x22, 0x50, 0x5f,
- 0xca, 0xbf, 0xc1, 0xb9, 0xdc, 0x7f, 0x72, 0xe1, 0x84, 0x7f, 0xb4, 0x73, 0x96, 0xe1, 0xf2, 0xaf,
- 0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x86, 0xb9, 0xfc, 0x29, 0xd6, 0x23, 0x00, 0x00,
+ // 3562 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4f, 0x73, 0xdb, 0xc8,
+ 0x72, 0x17, 0x29, 0x89, 0x22, 0x9b, 0xa2, 0x44, 0x8d, 0xfe, 0xd1, 0x92, 0xff, 0x2d, 0x5e, 0xad,
+ 0xed, 0xf7, 0xca, 0x8f, 0x36, 0xb9, 0x92, 0x57, 0xd9, 0xf7, 0x52, 0xf5, 0x48, 0x91, 0x12, 0x19,
+ 0xcb, 0xa4, 0x76, 0x48, 0x7a, 0x77, 0x53, 0x95, 0x42, 0x41, 0xc4, 0x48, 0x46, 0x99, 0x04, 0x18,
+ 0x60, 0xe8, 0xa5, 0x73, 0xce, 0x2d, 0xc9, 0xe9, 0x5d, 0xf2, 0x25, 0x52, 0x39, 0xe4, 0x90, 0x4b,
+ 0x4e, 0x39, 0xe7, 0x4b, 0xa4, 0x2a, 0x5f, 0x20, 0xa9, 0x9c, 0x53, 0xa9, 0xf9, 0x07, 0x0c, 0x40,
+ 0x4a, 0xb6, 0x37, 0xb9, 0xd8, 0x9a, 0xee, 0x5f, 0xff, 0xa6, 0x31, 0xd3, 0xdd, 0xd3, 0x18, 0x10,
+ 0x1e, 0x7e, 0xf0, 0x46, 0xf4, 0x9d, 0x65, 0x4e, 0x7c, 0x8f, 0x7a, 0xc1, 0x0b, 0x32, 0xa3, 0xc4,
+ 0x0d, 0x1c, 0xcf, 0x0d, 0xca, 0x5c, 0x82, 0x72, 0xa1, 0xe4, 0x60, 0x1e, 0x6a, 0x0e, 0x3d, 0xf7,
+ 0xda, 0xb9, 0x11, 0xd0, 0x83, 0xc3, 0x1b, 0xcf, 0xbb, 0x19, 0x91, 0x17, 0x7c, 0x74, 0x35, 0xbd,
+ 0x7e, 0x41, 0xc6, 0x13, 0xfa, 0x51, 0x28, 0x8d, 0xbf, 0x80, 0xec, 0x5b, 0x6b, 0x34, 0x25, 0x3d,
+ 0x42, 0xd1, 0x06, 0xa4, 0x1d, 0xbb, 0x94, 0x7a, 0x9c, 0x7a, 0x96, 0xc3, 0x69, 0xc7, 0x46, 0x27,
+ 0xb0, 0x6e, 0x8d, 0x2c, 0x7f, 0x2c, 0xe9, 0x4a, 0xe9, 0xc7, 0xa9, 0x67, 0xf9, 0xea, 0x76, 0x59,
+ 0xb2, 0xd7, 0x98, 0xee, 0x94, 0xff, 0xdd, 0x5a, 0xc2, 0x79, 0x2b, 0x1a, 0xd6, 0xd7, 0x60, 0xf5,
+ 0x03, 0x63, 0x35, 0x9e, 0x43, 0x8e, 0xd3, 0xf7, 0x3f, 0x4e, 0x88, 0xf1, 0x08, 0x56, 0xd8, 0xff,
+ 0x28, 0x07, 0xab, 0xcd, 0x37, 0x97, 0xfd, 0x9f, 0x8a, 0x4b, 0x68, 0x1d, 0xb2, 0x8d, 0x76, 0xaf,
+ 0x5f, 0xeb, 0x9c, 0x36, 0x8b, 0x29, 0xe3, 0x7b, 0xd8, 0x10, 0xce, 0x4c, 0xc8, 0xd0, 0xb9, 0x76,
+ 0x88, 0x3f, 0xe7, 0xd2, 0x0b, 0x49, 0xcc, 0x7d, 0xd9, 0xa8, 0xde, 0x2b, 0x87, 0xcb, 0x50, 0x0e,
+ 0xe7, 0x29, 0xb3, 0x7f, 0xb0, 0x74, 0x80, 0xc2, 0x3a, 0x26, 0x74, 0xea, 0xbb, 0x5c, 0x1d, 0xa0,
+ 0x22, 0x2c, 0xf7, 0x08, 0xe5, 0x8c, 0x05, 0xcc, 0xfe, 0x44, 0x8f, 0x21, 0x3f, 0x70, 0x83, 0xe9,
+ 0x64, 0xe2, 0xf9, 0x94, 0xd8, 0x9c, 0xb8, 0x80, 0x75, 0x11, 0xda, 0x81, 0xd5, 0xa6, 0xef, 0x7b,
+ 0x7e, 0x69, 0x99, 0xeb, 0xc4, 0x00, 0x1d, 0x40, 0xb6, 0xe1, 0x04, 0xd4, 0x72, 0x87, 0xa4, 0xb4,
+ 0xc2, 0x15, 0xe1, 0xd8, 0x78, 0x05, 0xe8, 0x9c, 0x50, 0x35, 0xc4, 0xe4, 0x2f, 0xa7, 0x24, 0xe0,
+ 0x33, 0x79, 0xee, 0xb4, 0x41, 0x3e, 0x38, 0x43, 0xd2, 0x56, 0x4f, 0xa5, 0x8b, 0x8c, 0x0a, 0x6c,
+ 0xc7, 0xec, 0x82, 0x89, 0xe7, 0x06, 0x84, 0x4d, 0x65, 0xab, 0xa9, 0x84, 0xe7, 0xe1, 0xd8, 0xa8,
+ 0xc2, 0xce, 0x39, 0xa1, 0x5d, 0x77, 0x3a, 0x70, 0x9d, 0xb6, 0x7b, 0xed, 0xa9, 0xc9, 0x0e, 0x20,
+ 0x3b, 0x65, 0x12, 0x9b, 0xcc, 0x94, 0x8d, 0x1a, 0x1b, 0xff, 0xbe, 0x02, 0xbb, 0x09, 0x23, 0x39,
+ 0xd3, 0x25, 0x64, 0x2d, 0x7b, 0xdc, 0xa3, 0x16, 0x15, 0x33, 0x6d, 0x54, 0x8f, 0xb4, 0x25, 0x5e,
+ 0x68, 0x53, 0xae, 0xd9, 0x63, 0xc7, 0x75, 0x02, 0xea, 0x5b, 0xd4, 0xf9, 0x40, 0xb8, 0x2d, 0x0e,
+ 0x59, 0x50, 0x17, 0x72, 0xde, 0x84, 0xf8, 0x82, 0x52, 0xec, 0x5a, 0xe5, 0x93, 0x94, 0xdd, 0x09,
+ 0x61, 0x6c, 0x9e, 0x6b, 0x8d, 0x04, 0x5f, 0xc4, 0xc1, 0x08, 0x45, 0x00, 0xb6, 0x5d, 0x9b, 0xef,
+ 0xc8, 0xe7, 0x10, 0x8a, 0xb8, 0x9c, 0x0a, 0xd2, 0xb6, 0x6b, 0xe3, 0x88, 0xc3, 0xf8, 0xd7, 0x14,
+ 0x14, 0x93, 0x7a, 0x04, 0x90, 0x19, 0x74, 0x5e, 0x77, 0x7f, 0xe8, 0x14, 0x97, 0x10, 0x82, 0x8d,
+ 0x7e, 0xb3, 0x63, 0xd6, 0x6b, 0xbd, 0xa6, 0xd9, 0x37, 0xcf, 0x1a, 0x3f, 0x16, 0x53, 0x68, 0x0f,
+ 0x50, 0x6b, 0xd0, 0x69, 0xe0, 0x66, 0x43, 0x97, 0xa7, 0x51, 0x09, 0x76, 0xce, 0xdb, 0xe7, 0xb5,
+ 0x7a, 0xbb, 0x6f, 0x36, 0xfb, 0xad, 0x26, 0xee, 0x34, 0x85, 0x66, 0x99, 0x59, 0x30, 0x96, 0xf3,
+ 0xb8, 0x7c, 0x25, 0xc1, 0xde, 0x6a, 0xfc, 0x58, 0x5c, 0x5d, 0xc0, 0xce, 0xe4, 0x99, 0x85, 0xec,
+ 0x4c, 0xb3, 0x66, 0x9c, 0xc3, 0xf6, 0x82, 0x7d, 0x60, 0x44, 0xb5, 0xc6, 0x9b, 0x5e, 0xbf, 0xd6,
+ 0x6f, 0x9a, 0x83, 0x4e, 0xa3, 0x79, 0xd6, 0xee, 0x34, 0x1b, 0xc5, 0x25, 0xf6, 0x78, 0x17, 0xdd,
+ 0xd3, 0xd7, 0xcd, 0x46, 0x31, 0xc5, 0x72, 0x70, 0xd0, 0x91, 0xa3, 0xb4, 0x71, 0x06, 0xc5, 0xe4,
+ 0xea, 0xa3, 0x7d, 0xd8, 0xee, 0x5e, 0x36, 0xf1, 0x3c, 0x4d, 0x1e, 0xd6, 0x9a, 0x9d, 0x5a, 0xfd,
+ 0x42, 0xf1, 0x34, 0xda, 0x3d, 0x31, 0x4a, 0x1b, 0xff, 0x9c, 0xe2, 0x39, 0xd0, 0x1d, 0xd1, 0x4b,
+ 0xcf, 0xa7, 0xa7, 0xde, 0xd4, 0xa5, 0xc4, 0x0f, 0xd0, 0x1e, 0x64, 0x58, 0x56, 0x75, 0x3c, 0x19,
+ 0x94, 0x72, 0x84, 0xea, 0x90, 0x65, 0x7f, 0xb1, 0xd4, 0x95, 0x51, 0xf2, 0x24, 0xb1, 0xa9, 0x71,
+ 0xa2, 0xf2, 0xa5, 0x44, 0xe3, 0xd0, 0xce, 0x68, 0x42, 0x56, 0x49, 0x51, 0x11, 0xd6, 0xd9, 0xdf,
+ 0xe6, 0xa0, 0xf3, 0xba, 0x23, 0x76, 0x71, 0x17, 0xb6, 0xb8, 0x24, 0x5c, 0xb8, 0x4e, 0xa7, 0x5d,
+ 0x4c, 0x85, 0xc0, 0xcb, 0x6e, 0xc7, 0xec, 0x5e, 0xf4, 0x8b, 0x69, 0xe3, 0xdf, 0x96, 0xe1, 0x60,
+ 0x7e, 0xc2, 0x30, 0x45, 0x4a, 0xb0, 0x46, 0x67, 0xf5, 0x8f, 0x94, 0x04, 0xfc, 0x11, 0x56, 0xb0,
+ 0x1a, 0x32, 0x8d, 0x2f, 0x35, 0x69, 0xa1, 0x91, 0x43, 0x74, 0x1f, 0x72, 0x74, 0x76, 0x69, 0x0d,
+ 0xdf, 0x13, 0x1a, 0xf0, 0x98, 0x5d, 0xc1, 0x91, 0x80, 0x69, 0xfd, 0x50, 0xbb, 0x22, 0xb4, 0xa1,
+ 0x00, 0x3d, 0x81, 0x0d, 0x3a, 0xe3, 0x25, 0x47, 0x41, 0x56, 0x39, 0x24, 0x21, 0x65, 0x38, 0x3f,
+ 0x8e, 0xcb, 0x08, 0x9c, 0x3f, 0x87, 0xa3, 0xb3, 0xfa, 0xd0, 0x0a, 0xa8, 0xc2, 0xad, 0x29, 0x3e,
+ 0x5d, 0x2a, 0xf8, 0x62, 0xb8, 0xac, 0xe2, 0x4b, 0xe2, 0xe8, 0x6c, 0xa0, 0xe3, 0x72, 0x8a, 0x6f,
+ 0x30, 0xc7, 0x17, 0xc3, 0x81, 0xe2, 0x1b, 0xcc, 0xf1, 0xbd, 0xd1, 0x71, 0x79, 0xc5, 0xf7, 0x66,
+ 0x8e, 0x2f, 0x86, 0x5b, 0x57, 0x7c, 0xba, 0xd4, 0x68, 0xa8, 0x02, 0x79, 0xe9, 0xb9, 0xdd, 0x09,
+ 0x75, 0x86, 0xd6, 0x88, 0x95, 0x06, 0xf4, 0x1c, 0x56, 0xf9, 0x41, 0xc8, 0x77, 0x31, 0x5f, 0xdd,
+ 0x2b, 0x8b, 0x63, 0xb2, 0xac, 0x8e, 0xc9, 0x72, 0x93, 0x69, 0xb1, 0x00, 0x19, 0x7f, 0x9d, 0x86,
+ 0xfb, 0x8b, 0x68, 0xc2, 0xb0, 0xf8, 0x0d, 0x14, 0x27, 0xde, 0xcf, 0xc4, 0x3f, 0x23, 0xc4, 0x7e,
+ 0xeb, 0x8d, 0xa8, 0x75, 0x23, 0x2a, 0x68, 0x1a, 0xcf, 0xc9, 0x51, 0x15, 0x76, 0x7c, 0x32, 0x24,
+ 0xce, 0x07, 0x62, 0x4b, 0xaa, 0x4b, 0x06, 0xe1, 0x51, 0x93, 0xc6, 0x0b, 0x75, 0xe8, 0x15, 0xec,
+ 0x8d, 0x89, 0xa5, 0xa6, 0xbe, 0xb0, 0xa6, 0xee, 0xf0, 0x9d, 0xb0, 0x5a, 0xe6, 0x56, 0xb7, 0x68,
+ 0x99, 0x5f, 0x23, 0x2b, 0x20, 0x7e, 0xdd, 0xb1, 0x82, 0xd3, 0xa9, 0xef, 0x13, 0x97, 0xf2, 0x18,
+ 0x4b, 0xe3, 0x39, 0x39, 0x3b, 0xa0, 0x28, 0x19, 0xf3, 0xec, 0x9f, 0xfa, 0x84, 0xc7, 0x59, 0x1a,
+ 0xeb, 0x22, 0xe3, 0x1f, 0x53, 0xf0, 0x48, 0x2c, 0x43, 0x93, 0xbe, 0x23, 0xbe, 0x4b, 0x68, 0xdd,
+ 0x77, 0xec, 0x1b, 0xc2, 0x32, 0xa5, 0xe5, 0x04, 0xd4, 0xf3, 0x3f, 0x22, 0x0c, 0x39, 0xdb, 0xf1,
+ 0xc9, 0x90, 0x55, 0x90, 0x5b, 0x0f, 0x91, 0x5b, 0xcd, 0xcb, 0x0d, 0x65, 0x8b, 0x23, 0x1a, 0xe3,
+ 0x04, 0x72, 0xa1, 0x1c, 0x15, 0x20, 0xa7, 0x17, 0x21, 0x56, 0xbf, 0x2e, 0x7b, 0x7d, 0xdc, 0xac,
+ 0xbd, 0x29, 0xa6, 0xd0, 0x06, 0x40, 0xa3, 0xfb, 0x43, 0x47, 0x8e, 0xd3, 0xc6, 0x1f, 0x57, 0xe1,
+ 0xe9, 0x27, 0xa6, 0x0c, 0xf7, 0xf0, 0x21, 0x80, 0xed, 0x7b, 0x93, 0xe6, 0x07, 0xe2, 0xd2, 0x40,
+ 0x16, 0x28, 0x4d, 0xc2, 0x8a, 0x97, 0x37, 0xa4, 0x2c, 0xd4, 0x44, 0x97, 0x20, 0x47, 0x2c, 0xf1,
+ 0x27, 0x5a, 0x72, 0x17, 0xb0, 0x1a, 0xb2, 0xd5, 0xbf, 0xf2, 0x3d, 0xcb, 0xd6, 0xc3, 0x54, 0x34,
+ 0x0b, 0x73, 0x72, 0x86, 0x1d, 0x4f, 0x47, 0x6c, 0x03, 0x23, 0xec, 0xaa, 0xc0, 0x26, 0xe5, 0xe8,
+ 0x39, 0x6c, 0x0d, 0xfd, 0x21, 0xcf, 0x6b, 0x62, 0xeb, 0xf9, 0x5e, 0xc0, 0xf3, 0x0a, 0xc6, 0x3c,
+ 0x75, 0x6d, 0xe2, 0x07, 0xce, 0x5f, 0x11, 0x3d, 0xe9, 0x0b, 0x78, 0x4e, 0x8e, 0x9e, 0xc1, 0xa6,
+ 0xf7, 0x21, 0x0e, 0xcd, 0x72, 0x68, 0x52, 0xcc, 0x90, 0xf2, 0x31, 0x5f, 0x1d, 0xc9, 0x65, 0xc9,
+ 0x09, 0x64, 0x42, 0xcc, 0xe2, 0x5d, 0x89, 0x8e, 0xfb, 0x5e, 0xa5, 0xfa, 0xad, 0x84, 0x03, 0x87,
+ 0x2f, 0xd4, 0xa1, 0x23, 0xd8, 0x95, 0xf2, 0x4a, 0xf5, 0xa4, 0xef, 0x55, 0x8f, 0x8f, 0xbb, 0xc2,
+ 0x28, 0xcf, 0x8d, 0x16, 0x2b, 0x35, 0xab, 0xea, 0xf1, 0xab, 0xbe, 0x77, 0x5c, 0xa9, 0xc8, 0xa9,
+ 0xd6, 0x63, 0x56, 0x71, 0x25, 0xcb, 0x2d, 0xa9, 0x38, 0xae, 0x54, 0xfb, 0x5e, 0xe5, 0x65, 0xf5,
+ 0x1b, 0x69, 0x56, 0xe0, 0x66, 0xb7, 0x68, 0xd1, 0x09, 0xec, 0x2b, 0x37, 0x5e, 0x56, 0x8f, 0xfa,
+ 0x5e, 0xe5, 0xb8, 0x72, 0x22, 0x0d, 0x37, 0xb8, 0xe1, 0x6d, 0x6a, 0xe3, 0x0f, 0x50, 0x14, 0x41,
+ 0x79, 0x46, 0x86, 0x2a, 0x6f, 0xbe, 0xac, 0x20, 0xfd, 0x57, 0x0a, 0x4a, 0x49, 0x8a, 0x30, 0x90,
+ 0x9f, 0xc0, 0xc6, 0xd0, 0xf3, 0x59, 0xbe, 0x10, 0x3b, 0x3a, 0xaa, 0x0a, 0x38, 0x21, 0x45, 0x65,
+ 0x40, 0xa1, 0xe4, 0xd4, 0xb3, 0xc9, 0x0f, 0x9e, 0x6f, 0xab, 0xe0, 0x5e, 0xa0, 0x61, 0x09, 0x72,
+ 0x4d, 0x86, 0x3d, 0x32, 0xf4, 0x5c, 0x5b, 0xc5, 0xba, 0x26, 0xe1, 0xb5, 0xdb, 0xa3, 0xd6, 0x28,
+ 0xe2, 0x12, 0xc1, 0x9e, 0x90, 0xb2, 0x05, 0x9f, 0xba, 0x92, 0xdf, 0xba, 0x1a, 0x91, 0x08, 0x2f,
+ 0x02, 0xfe, 0x16, 0xad, 0x71, 0xae, 0xfa, 0xd6, 0xe8, 0x54, 0x16, 0xdd, 0xee, 0x3e, 0xac, 0x39,
+ 0x2e, 0xbd, 0x36, 0xe5, 0xcb, 0xc2, 0x1a, 0xce, 0xb0, 0x61, 0xdb, 0x46, 0xbb, 0x90, 0xf1, 0xdc,
+ 0x29, 0x93, 0xa7, 0xb9, 0x7c, 0xd5, 0x73, 0xa7, 0x6d, 0xdb, 0xf8, 0xbb, 0x14, 0x7c, 0xcd, 0x98,
+ 0xc6, 0x43, 0x47, 0x95, 0x85, 0x33, 0xdf, 0x1a, 0x93, 0x26, 0x2b, 0x53, 0x36, 0xb1, 0x2f, 0xc7,
+ 0x9f, 0xdd, 0xb4, 0xa3, 0xfb, 0x5a, 0xa7, 0xcd, 0x97, 0xae, 0xb5, 0x14, 0xf5, 0xda, 0xec, 0xe5,
+ 0xc1, 0x27, 0x01, 0xa1, 0x7c, 0xb5, 0xb2, 0x58, 0x0c, 0xea, 0x1b, 0xb0, 0xee, 0x04, 0xe6, 0xd4,
+ 0x75, 0x4c, 0x87, 0x77, 0xe4, 0xa7, 0xb0, 0x75, 0x4e, 0x28, 0x9e, 0xf1, 0x9a, 0xfd, 0x4b, 0x1f,
+ 0xea, 0x42, 0x9c, 0x74, 0xa3, 0x24, 0xcf, 0x03, 0x00, 0xd6, 0x23, 0x99, 0x23, 0xeb, 0x8a, 0x8c,
+ 0xe4, 0x13, 0xe4, 0x98, 0xe4, 0x82, 0x09, 0x14, 0x5b, 0xe0, 0x72, 0xb6, 0x1c, 0x67, 0xeb, 0xb9,
+ 0xc6, 0x7f, 0xac, 0xc3, 0x5e, 0x72, 0xb1, 0x65, 0x78, 0xdd, 0x4b, 0x38, 0xd6, 0x5a, 0x0a, 0x5d,
+ 0xdb, 0x8f, 0xbb, 0xd6, 0x4a, 0x49, 0xe7, 0xd0, 0x53, 0xd8, 0x98, 0x78, 0x81, 0xc3, 0x5a, 0x53,
+ 0xd3, 0xf6, 0x9d, 0x6b, 0xb1, 0x20, 0x99, 0x56, 0x1a, 0x17, 0x94, 0xbc, 0xc1, 0xc4, 0x0c, 0xe8,
+ 0x92, 0x1b, 0x4b, 0x03, 0xae, 0x70, 0xe0, 0x32, 0x2e, 0x28, 0xb9, 0x00, 0x7e, 0x07, 0x25, 0x9b,
+ 0x8c, 0x9c, 0xb1, 0x43, 0x89, 0x6f, 0x8e, 0x9d, 0x20, 0x30, 0x6d, 0x42, 0xe5, 0xb1, 0xb3, 0xca,
+ 0x4d, 0x56, 0xf0, 0x5e, 0x88, 0x78, 0xe3, 0x04, 0x41, 0x43, 0xe9, 0xd1, 0x23, 0x80, 0x2b, 0x67,
+ 0x62, 0x12, 0x56, 0x27, 0x45, 0xe1, 0xcc, 0xb4, 0x56, 0x71, 0xee, 0xca, 0x99, 0xf0, 0xd2, 0x19,
+ 0xa0, 0x07, 0xc0, 0x06, 0x6c, 0x87, 0x64, 0xad, 0xcc, 0xb4, 0x32, 0x38, 0x7b, 0xe5, 0x4c, 0x06,
+ 0x4c, 0xc2, 0xea, 0xcc, 0x35, 0x19, 0x9a, 0x61, 0x8a, 0x98, 0xc1, 0xc7, 0xf1, 0x95, 0x37, 0x12,
+ 0xb5, 0x32, 0xd3, 0x5a, 0xc3, 0xdb, 0xd7, 0x64, 0x78, 0xaa, 0xb4, 0x3d, 0xa1, 0x64, 0xf5, 0x42,
+ 0x58, 0xd9, 0xe4, 0x67, 0x16, 0xcf, 0x91, 0x3d, 0xaf, 0x9c, 0x99, 0x56, 0x16, 0xef, 0x72, 0x3b,
+ 0xa9, 0x0f, 0x09, 0xd0, 0x1f, 0xe0, 0x30, 0x6e, 0x19, 0x4b, 0x10, 0x5e, 0x48, 0x33, 0xad, 0x1c,
+ 0xbe, 0xa7, 0x5b, 0x0f, 0x74, 0x08, 0xfa, 0x1a, 0x0a, 0x31, 0x06, 0x5e, 0x47, 0x33, 0x2d, 0xc0,
+ 0xeb, 0xba, 0x0d, 0x7a, 0x09, 0xdb, 0xf1, 0x07, 0x13, 0x2b, 0xb0, 0xce, 0xc1, 0x79, 0xbc, 0xa5,
+ 0x3f, 0x96, 0x58, 0x8a, 0x67, 0xb0, 0x39, 0xbb, 0x21, 0x63, 0xf3, 0x3d, 0xf9, 0xa8, 0xd6, 0xb3,
+ 0xc0, 0xd1, 0xeb, 0xb8, 0xc0, 0x14, 0xaf, 0xc9, 0xc7, 0x68, 0x4d, 0x39, 0x72, 0xe4, 0x05, 0xa2,
+ 0x40, 0x66, 0x5a, 0x05, 0x9c, 0x65, 0xa2, 0x0b, 0x2f, 0xe0, 0x44, 0xfe, 0xcc, 0x9c, 0x8c, 0x3c,
+ 0x6b, 0x1c, 0x08, 0xa6, 0xd2, 0x26, 0x07, 0x6d, 0xe0, 0x82, 0x3f, 0xbb, 0xe4, 0x72, 0xf1, 0xea,
+ 0xfd, 0x5b, 0x40, 0x11, 0xd2, 0xf5, 0x5c, 0xd3, 0xb1, 0x47, 0xa4, 0x54, 0xe4, 0xe0, 0x4d, 0xbc,
+ 0xa9, 0xc0, 0x1d, 0xcf, 0x6d, 0xdb, 0x23, 0x1e, 0xae, 0xfe, 0xcc, 0xf4, 0xc6, 0x43, 0xa7, 0xb4,
+ 0xc5, 0x31, 0x45, 0x9c, 0xf1, 0x67, 0x2c, 0xf7, 0x99, 0x8a, 0x4a, 0x15, 0xe2, 0xaa, 0x2d, 0x9c,
+ 0xa1, 0x42, 0xf5, 0x1d, 0xdc, 0x93, 0x56, 0xa6, 0xac, 0xe2, 0xe6, 0xd0, 0x1f, 0x4a, 0xc7, 0xb6,
+ 0x39, 0x18, 0xe1, 0x5d, 0xc1, 0x23, 0x8f, 0xc4, 0x53, 0x79, 0xf2, 0xa2, 0x43, 0xc8, 0xfa, 0x33,
+ 0xf3, 0x8a, 0x57, 0xde, 0x1d, 0x0e, 0xdd, 0x8e, 0x5e, 0x06, 0x1e, 0x01, 0x30, 0xef, 0xe5, 0xe1,
+ 0xba, 0xcb, 0xd5, 0x3b, 0x7a, 0xc7, 0x7f, 0x08, 0x59, 0xaa, 0xac, 0xf7, 0xb8, 0x7a, 0x37, 0x7a,
+ 0xc9, 0x78, 0x04, 0x40, 0x23, 0xeb, 0x7d, 0xae, 0xde, 0xd3, 0xdf, 0x26, 0x7e, 0x05, 0xeb, 0x57,
+ 0xc4, 0x37, 0x7d, 0x22, 0x2f, 0x34, 0x4a, 0x1c, 0xb2, 0x8f, 0xf3, 0x57, 0xac, 0x22, 0xc8, 0x2b,
+ 0x8d, 0xaf, 0x20, 0x3f, 0x1a, 0xda, 0x37, 0x6a, 0xc3, 0xee, 0x71, 0x4c, 0x09, 0x03, 0x13, 0xca,
+ 0xdd, 0x62, 0x6e, 0xda, 0x8e, 0x42, 0x1c, 0x70, 0xc4, 0x3d, 0x9c, 0xf3, 0x6d, 0x47, 0x02, 0x1e,
+ 0x42, 0x8e, 0x3a, 0x63, 0x12, 0x50, 0x6b, 0x3c, 0x29, 0x1d, 0xf2, 0x6c, 0x3f, 0xc0, 0x91, 0xa8,
+ 0xbe, 0x0e, 0xe0, 0x04, 0xa6, 0x2c, 0x14, 0xf5, 0x3c, 0xe4, 0x9c, 0xc0, 0x14, 0xb5, 0xa1, 0xbe,
+ 0x0d, 0x5b, 0x4e, 0x60, 0xc6, 0xeb, 0x81, 0x14, 0xc6, 0x73, 0xbf, 0xfe, 0x00, 0x0e, 0x1d, 0x96,
+ 0xd8, 0x8b, 0xf3, 0xbc, 0xbe, 0x09, 0x05, 0x27, 0x30, 0xa3, 0x54, 0x96, 0x85, 0x35, 0x4c, 0xdd,
+ 0xfa, 0x01, 0x94, 0x9c, 0xc0, 0x5c, 0x98, 0xab, 0xf5, 0xfb, 0x70, 0x10, 0xea, 0xe6, 0x32, 0xb2,
+ 0xfe, 0x18, 0x1e, 0xce, 0x69, 0x63, 0x59, 0x57, 0x47, 0x50, 0x4c, 0x22, 0xea, 0x25, 0xd8, 0x9b,
+ 0x9b, 0x4f, 0x78, 0xb2, 0x03, 0xc8, 0x09, 0xcc, 0x44, 0xaa, 0x48, 0x7f, 0xc3, 0xb4, 0x90, 0xa8,
+ 0x44, 0x1e, 0xd4, 0xf7, 0x61, 0x37, 0x26, 0x55, 0x31, 0x2f, 0xd7, 0x58, 0xc6, 0xa9, 0x1c, 0xc9,
+ 0x80, 0xae, 0x3f, 0x84, 0xfb, 0x91, 0x6e, 0x3e, 0x86, 0xeb, 0x05, 0xc8, 0x0b, 0x3d, 0x8f, 0x34,
+ 0xb9, 0x94, 0x51, 0x64, 0x4a, 0x3d, 0x8d, 0xeb, 0xa3, 0xd8, 0xab, 0x6f, 0xc1, 0x26, 0x5b, 0x6a,
+ 0x2d, 0xd6, 0xea, 0x45, 0xd8, 0x70, 0x02, 0x53, 0x8b, 0x2c, 0xc5, 0x1a, 0x06, 0x92, 0x7c, 0xe0,
+ 0x30, 0x4a, 0x8c, 0xbf, 0x5d, 0x85, 0xc3, 0x3b, 0x8e, 0x61, 0xf4, 0x08, 0xf2, 0xac, 0x03, 0x37,
+ 0x49, 0xd4, 0x94, 0x67, 0xee, 0x68, 0xca, 0x33, 0x61, 0x53, 0xbe, 0x07, 0x99, 0x6b, 0xc6, 0x25,
+ 0xfa, 0x94, 0x0c, 0x96, 0x23, 0xf4, 0x6b, 0xad, 0x25, 0x37, 0x25, 0x82, 0x9f, 0x30, 0x78, 0x33,
+ 0x94, 0x9f, 0x85, 0xd0, 0xb0, 0xf3, 0x56, 0xd0, 0x55, 0x01, 0x0d, 0xe5, 0x12, 0xfa, 0x1c, 0x50,
+ 0xb8, 0xb2, 0xc4, 0x56, 0x60, 0x7e, 0xb0, 0xe0, 0x62, 0xd4, 0x91, 0x47, 0xc4, 0x61, 0xe3, 0xad,
+ 0xb0, 0x6b, 0x82, 0x38, 0x94, 0x4b, 0xe8, 0xd3, 0xa8, 0x1f, 0x57, 0x48, 0x7e, 0xc6, 0xe0, 0x0d,
+ 0x25, 0x96, 0xc0, 0x67, 0x50, 0x14, 0x7a, 0xf3, 0xd5, 0x91, 0xa9, 0xf5, 0xe3, 0x19, 0xbc, 0x21,
+ 0xe4, 0xaf, 0x8e, 0xc2, 0x26, 0x79, 0x5f, 0x21, 0x8f, 0x4d, 0xea, 0x99, 0x95, 0xea, 0xb7, 0xa6,
+ 0xd6, 0x91, 0x67, 0xf0, 0xb6, 0x34, 0x10, 0x0d, 0x79, 0x57, 0x35, 0xc9, 0x25, 0x69, 0x55, 0xa9,
+ 0x9e, 0x30, 0xb3, 0xea, 0xf1, 0xb1, 0x32, 0xe3, 0x67, 0x09, 0xde, 0x11, 0xfa, 0x44, 0x4b, 0x1e,
+ 0xd9, 0x55, 0x8f, 0x5f, 0x31, 0xbb, 0xe3, 0x4a, 0xc5, 0xd4, 0xba, 0xf2, 0xd0, 0x4e, 0x35, 0xe5,
+ 0x5d, 0xd5, 0x5c, 0xdf, 0x93, 0x76, 0xc7, 0x95, 0x2a, 0x77, 0xf3, 0x65, 0xf5, 0x1b, 0x53, 0xeb,
+ 0xcb, 0x33, 0x78, 0x57, 0x00, 0xc2, 0xb6, 0x5c, 0x5a, 0x7e, 0x07, 0x07, 0xca, 0xd3, 0x97, 0xd5,
+ 0x23, 0x6e, 0x7a, 0x5c, 0x39, 0x31, 0xb5, 0xce, 0x3c, 0x83, 0xf7, 0xa4, 0xaf, 0x61, 0x63, 0x2e,
+ 0x6c, 0x8d, 0xff, 0x4e, 0xc3, 0x93, 0x4f, 0x35, 0x86, 0xb2, 0x0b, 0xaa, 0x43, 0x76, 0x3a, 0x09,
+ 0xa8, 0x4f, 0xac, 0xb1, 0x6c, 0xd9, 0xf5, 0x2b, 0xab, 0xbb, 0x18, 0x42, 0x3b, 0x74, 0x06, 0x60,
+ 0x7b, 0x3f, 0xbb, 0x92, 0x25, 0xfd, 0x45, 0x2c, 0x9a, 0x25, 0xfa, 0x9b, 0x14, 0x3c, 0xe1, 0x69,
+ 0x4e, 0x24, 0x58, 0xc4, 0x8a, 0x49, 0x24, 0xdc, 0x9c, 0x8c, 0xcd, 0x6b, 0xcf, 0x1f, 0x5b, 0x54,
+ 0x5e, 0x99, 0x9e, 0x24, 0xde, 0xc8, 0x3f, 0xfd, 0xbc, 0xe5, 0x33, 0x6e, 0x8f, 0xbf, 0xf2, 0x6e,
+ 0xc7, 0x0a, 0x88, 0xf1, 0x12, 0x32, 0xe2, 0x2f, 0x7e, 0xb9, 0xd9, 0x6a, 0xe3, 0xfe, 0x4f, 0x66,
+ 0xff, 0x87, 0xae, 0x59, 0x6f, 0xf7, 0xc5, 0x75, 0x6a, 0xaf, 0xfd, 0x63, 0xff, 0x27, 0xf3, 0xac,
+ 0x3b, 0xc0, 0x5c, 0x96, 0x32, 0x28, 0xac, 0xc9, 0xa6, 0x55, 0x6b, 0x47, 0x53, 0x5a, 0x3b, 0xca,
+ 0xd2, 0x39, 0xa0, 0x16, 0x9d, 0x06, 0xb2, 0x4b, 0x95, 0x23, 0x56, 0x1f, 0xae, 0x2d, 0x67, 0x64,
+ 0xfa, 0xc4, 0x0a, 0x3c, 0x97, 0x3f, 0x5d, 0x0e, 0x03, 0x13, 0x61, 0x2e, 0x41, 0xf7, 0xf8, 0x59,
+ 0xcc, 0xef, 0x60, 0x78, 0x9e, 0xa7, 0xd8, 0x49, 0xcc, 0xa7, 0x32, 0x88, 0x78, 0x9d, 0xd0, 0x1a,
+ 0x66, 0xb9, 0xb5, 0x9f, 0xe8, 0x98, 0x7f, 0xab, 0x51, 0xa6, 0x1f, 0x2f, 0x3f, 0xcb, 0x57, 0x91,
+ 0xb6, 0x9c, 0x8a, 0x2c, 0x9c, 0xe6, 0xef, 0xc5, 0x55, 0x68, 0x72, 0x92, 0x2f, 0x6c, 0xef, 0xb5,
+ 0x15, 0x58, 0xbe, 0x6b, 0x05, 0x56, 0xee, 0x5c, 0x81, 0xd5, 0xf8, 0x0a, 0xfc, 0x99, 0xb8, 0xea,
+ 0x74, 0xa7, 0x2c, 0x00, 0xfa, 0x33, 0x3c, 0xeb, 0x51, 0x8b, 0x86, 0x6f, 0x55, 0x5f, 0xf6, 0x46,
+ 0xfa, 0x9f, 0xcb, 0x70, 0xb8, 0x90, 0x4c, 0x3e, 0xef, 0xaf, 0x61, 0xeb, 0xca, 0x0a, 0x08, 0x3b,
+ 0x42, 0x2c, 0x5f, 0xd5, 0x32, 0xf9, 0x5e, 0xca, 0x14, 0xfd, 0x59, 0xcd, 0x0f, 0xeb, 0xa3, 0x80,
+ 0xfa, 0x33, 0xd3, 0x7a, 0xaf, 0xa0, 0xe9, 0x08, 0x8a, 0x67, 0xb5, 0xf7, 0x12, 0x5a, 0x86, 0x1d,
+ 0xc5, 0xea, 0x7a, 0x1a, 0xf1, 0xb2, 0xbc, 0x65, 0xe1, 0xc4, 0x1d, 0x2f, 0xa4, 0x56, 0x78, 0x5f,
+ 0xe0, 0xdf, 0xeb, 0x47, 0x80, 0xc4, 0x63, 0x86, 0x7f, 0x1f, 0xd6, 0xdf, 0x22, 0x99, 0xd1, 0xb8,
+ 0xd3, 0xe2, 0x25, 0xb5, 0x40, 0x66, 0x54, 0xf3, 0x59, 0x02, 0x63, 0x2e, 0x67, 0x42, 0xa0, 0xe6,
+ 0xf1, 0x73, 0xd8, 0x96, 0x8c, 0x31, 0x87, 0xc5, 0x85, 0xcc, 0x26, 0x27, 0xd5, 0xfc, 0x95, 0xe8,
+ 0xa4, 0xbb, 0xd9, 0x10, 0x1d, 0xf3, 0xf6, 0x18, 0xf6, 0xe5, 0xf1, 0x6f, 0x0e, 0xc5, 0x5b, 0x9b,
+ 0xe9, 0x13, 0xea, 0x3b, 0x44, 0xdd, 0xcd, 0xec, 0x88, 0xee, 0x56, 0xbe, 0xd2, 0x61, 0xa1, 0x43,
+ 0xdf, 0x42, 0x29, 0x69, 0xc6, 0x4e, 0x68, 0x6f, 0x1a, 0x5e, 0xd2, 0xec, 0xc6, 0xec, 0xfa, 0x52,
+ 0x69, 0xfc, 0x31, 0x03, 0x9b, 0xe7, 0x84, 0xf2, 0x8f, 0x6b, 0x2a, 0x6a, 0x7e, 0x97, 0xf8, 0x5a,
+ 0x95, 0xaf, 0x3e, 0x88, 0x17, 0x9b, 0xc4, 0x77, 0x31, 0xf6, 0xba, 0xac, 0x0c, 0xd0, 0xef, 0x60,
+ 0x6d, 0x2a, 0xbe, 0xdd, 0xc8, 0x6a, 0xf8, 0xe8, 0xf6, 0x6f, 0x3b, 0xca, 0x5a, 0x59, 0xa0, 0x1a,
+ 0xe4, 0x3d, 0x71, 0x6b, 0xcf, 0x09, 0x96, 0x17, 0x4d, 0x9e, 0xb8, 0xd6, 0x6f, 0x2d, 0x61, 0xdd,
+ 0x06, 0xb5, 0x61, 0xc3, 0x73, 0xa7, 0xda, 0x05, 0x2f, 0x0f, 0x8c, 0x45, 0x6e, 0xc4, 0xef, 0x81,
+ 0x5b, 0x4b, 0x38, 0x61, 0x88, 0x30, 0x14, 0x08, 0x7d, 0x17, 0xdd, 0x36, 0xf2, 0xb0, 0xc9, 0x57,
+ 0x7f, 0xf3, 0xf9, 0x77, 0xa1, 0xad, 0x25, 0x1c, 0xa7, 0x40, 0x7f, 0xca, 0x2f, 0x60, 0xa4, 0x9a,
+ 0x87, 0x57, 0xbe, 0x7a, 0x38, 0x47, 0x18, 0xdd, 0x08, 0xb5, 0x96, 0xb0, 0x66, 0x80, 0xea, 0x00,
+ 0x1e, 0xf7, 0x9c, 0x3f, 0xd9, 0x1a, 0x37, 0x7f, 0x3c, 0x67, 0x9e, 0xb8, 0x5c, 0x61, 0x1c, 0x91,
+ 0x15, 0xba, 0x80, 0x35, 0x56, 0x8f, 0x18, 0x41, 0x96, 0x13, 0xbc, 0xfc, 0x82, 0xa3, 0x24, 0xdc,
+ 0x32, 0x49, 0x81, 0x4e, 0x40, 0xd5, 0x22, 0x1e, 0xa0, 0xf9, 0xea, 0xfd, 0x38, 0x5b, 0xfc, 0x2a,
+ 0x83, 0x59, 0x4a, 0x38, 0x7a, 0x0d, 0xeb, 0x9e, 0x28, 0x35, 0xbc, 0xcc, 0xf0, 0x38, 0xcd, 0x57,
+ 0xbf, 0x9e, 0x7b, 0x9a, 0x45, 0x95, 0xad, 0xb5, 0x84, 0x63, 0xc6, 0xa8, 0x06, 0xe0, 0x85, 0xc7,
+ 0x00, 0x6f, 0x67, 0xe6, 0xb7, 0x7c, 0x34, 0xef, 0x8c, 0x66, 0x54, 0xcf, 0xc1, 0x9a, 0x2f, 0x14,
+ 0xc6, 0xbf, 0xe4, 0xf8, 0xf5, 0x9e, 0xcc, 0x0a, 0x59, 0xfe, 0xbe, 0x0b, 0xcb, 0xb7, 0xb8, 0x13,
+ 0x37, 0xe2, 0xf4, 0x31, 0x70, 0xb9, 0xc7, 0x91, 0x61, 0x89, 0x6f, 0x42, 0x8e, 0xf8, 0xbe, 0x28,
+ 0xe7, 0xf2, 0xf3, 0xd8, 0xd3, 0xbb, 0xcc, 0x79, 0xb7, 0x29, 0xe0, 0x38, 0xb2, 0x44, 0xbf, 0xd7,
+ 0x32, 0x53, 0x24, 0xc7, 0xc3, 0xdb, 0x32, 0x53, 0x10, 0xc5, 0x52, 0xf3, 0xf7, 0x51, 0x6a, 0xae,
+ 0xdc, 0x12, 0x39, 0x89, 0xcf, 0xae, 0x7a, 0x6e, 0xbe, 0x86, 0xf5, 0x89, 0xc8, 0x3b, 0xea, 0x12,
+ 0x3f, 0x90, 0xc9, 0xf0, 0xf5, 0x9d, 0xc9, 0xa9, 0xf1, 0xc4, 0x8c, 0xd1, 0xf7, 0x73, 0x59, 0x2a,
+ 0x52, 0xe1, 0xe9, 0x27, 0xb2, 0x54, 0x23, 0x4c, 0x66, 0xeb, 0x15, 0x6c, 0xc5, 0x52, 0x4d, 0xcb,
+ 0x90, 0xea, 0xe7, 0x67, 0xac, 0x36, 0xc1, 0x3c, 0x1d, 0x6a, 0xc6, 0xb2, 0x57, 0x64, 0xcf, 0xaf,
+ 0xee, 0xc8, 0x5e, 0x8d, 0x4d, 0xcf, 0xe2, 0xd7, 0xfc, 0xe9, 0x2f, 0x3d, 0x57, 0xad, 0x93, 0x4c,
+ 0x9d, 0xaf, 0xee, 0xc8, 0xe4, 0xd8, 0x73, 0x6b, 0xa6, 0x68, 0xc0, 0xef, 0x37, 0x43, 0x26, 0x91,
+ 0x45, 0x95, 0x2f, 0xee, 0x0e, 0x79, 0x1d, 0x8d, 0x78, 0xd0, 0x9f, 0x44, 0x79, 0x9d, 0x5f, 0x54,
+ 0x86, 0x13, 0xcd, 0x90, 0x9e, 0xd8, 0x17, 0x89, 0xc4, 0x5e, 0x9f, 0xeb, 0x8a, 0xef, 0xe8, 0x32,
+ 0xe6, 0x32, 0xbb, 0x1e, 0xcb, 0xec, 0xc2, 0xc2, 0xc0, 0x1d, 0x2d, 0x70, 0x47, 0xb3, 0x32, 0x2a,
+ 0x90, 0x11, 0x09, 0x89, 0x76, 0xa0, 0xd8, 0xeb, 0xd7, 0xfa, 0x83, 0x5e, 0xec, 0x33, 0x78, 0x06,
+ 0xd2, 0xdd, 0xd7, 0xc5, 0x14, 0xff, 0x61, 0x0b, 0xc6, 0x5d, 0x5c, 0x4c, 0x1b, 0xff, 0x90, 0x82,
+ 0xbc, 0x96, 0x85, 0xcc, 0x10, 0x37, 0x6b, 0xbd, 0x6e, 0x27, 0x66, 0xb8, 0x09, 0xf9, 0x41, 0xa7,
+ 0x37, 0xb8, 0xbc, 0xec, 0xe2, 0x3e, 0xff, 0x86, 0xbe, 0x0b, 0x5b, 0xed, 0xce, 0xdb, 0xda, 0x45,
+ 0xbb, 0x61, 0x36, 0x9a, 0x6f, 0xdb, 0xa7, 0x4d, 0xb3, 0xdd, 0x28, 0xa6, 0x75, 0x31, 0x83, 0x9a,
+ 0xfd, 0x9f, 0x2e, 0x9b, 0xc5, 0x65, 0x94, 0x87, 0xb5, 0x7e, 0xfb, 0x4d, 0xb3, 0x3b, 0xe8, 0x17,
+ 0x57, 0xd8, 0x0c, 0x0a, 0x83, 0x9b, 0xdf, 0x0b, 0xc8, 0x2a, 0x6b, 0xb6, 0xdb, 0x9d, 0x7e, 0x13,
+ 0x77, 0x6a, 0x17, 0xa6, 0xf0, 0x2d, 0x23, 0x64, 0xfa, 0x24, 0xc5, 0xb5, 0x3a, 0x40, 0xd6, 0x97,
+ 0x0f, 0x6f, 0xbc, 0x85, 0xcd, 0x5e, 0xe2, 0x4c, 0x4f, 0xfe, 0x14, 0x28, 0xf5, 0xd9, 0x3f, 0x05,
+ 0xd2, 0xca, 0xe2, 0xff, 0xa4, 0xa0, 0xd8, 0xfb, 0x92, 0xb2, 0xd8, 0xfb, 0xbf, 0x95, 0xc5, 0xde,
+ 0xe7, 0x95, 0xc5, 0x5f, 0xb2, 0xbd, 0x47, 0xbf, 0x64, 0x77, 0x0d, 0x07, 0x76, 0x7b, 0x8e, 0x7b,
+ 0x33, 0x22, 0xc9, 0x96, 0xe9, 0x00, 0xb2, 0xd4, 0xf2, 0x6f, 0x08, 0x0d, 0xbf, 0x30, 0x84, 0x63,
+ 0x74, 0x14, 0x2e, 0xa0, 0xec, 0x88, 0x0e, 0x16, 0x56, 0x7e, 0x8e, 0xc0, 0xe1, 0x5a, 0x7f, 0x0f,
+ 0x7b, 0xc9, 0xa9, 0xe4, 0x82, 0x7f, 0x1b, 0xed, 0xb4, 0xdc, 0xc6, 0xc3, 0x3b, 0x8e, 0x12, 0x1c,
+ 0x85, 0x45, 0xe8, 0x7d, 0xef, 0xff, 0xcb, 0xfb, 0xde, 0x27, 0xbd, 0xef, 0x7d, 0x99, 0xf7, 0xbd,
+ 0x5b, 0xbd, 0xaf, 0xfe, 0x53, 0x0a, 0x72, 0x4d, 0x05, 0x44, 0x18, 0xf2, 0xe7, 0x84, 0x36, 0x67,
+ 0x02, 0x8e, 0xf4, 0x82, 0xb0, 0x70, 0x87, 0x0e, 0xbe, 0xba, 0x03, 0x21, 0x5d, 0xc3, 0x90, 0xef,
+ 0xdd, 0xc9, 0xd9, 0xfb, 0x24, 0x67, 0xd2, 0xff, 0x3a, 0x86, 0x07, 0x9e, 0x7f, 0x53, 0xf6, 0x26,
+ 0xc4, 0x1d, 0x7a, 0xbe, 0x5d, 0x16, 0x3f, 0xf5, 0x8b, 0xec, 0xfe, 0xbc, 0x72, 0xe3, 0xd0, 0x77,
+ 0xd3, 0xab, 0xf2, 0xd0, 0x1b, 0xbf, 0x50, 0xa8, 0x17, 0x02, 0xf5, 0x5b, 0xf9, 0x83, 0xc0, 0x0f,
+ 0xc7, 0x2f, 0x6e, 0xbc, 0xe8, 0x17, 0x84, 0x57, 0x19, 0x2e, 0xff, 0xe6, 0x7f, 0x03, 0x00, 0x00,
+ 0xff, 0xff, 0x66, 0x2e, 0xdd, 0x3c, 0x63, 0x28, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
new file mode 100644
index 0000000..cbef197
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -0,0 +1,22 @@
+.DS_Store
+Thumbs.db
+
+.tools/
+.idea/
+.vscode/
+*.iml
+*.so
+coverage.*
+
+gen/
+
+/example/basic/basic
+/example/grpc/client/client
+/example/grpc/server/server
+/example/http/client/client
+/example/http/server/server
+/example/jaeger/jaeger
+/example/namedtracer/namedtracer
+/example/prometheus/prometheus
+/example/zipkin/zipkin
+/example/otel-collector/otel-collector
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
new file mode 100644
index 0000000..38a1f56
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "opentelemetry-proto"]
+ path = exporters/otlp/internal/opentelemetry-proto
+ url = https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
new file mode 100644
index 0000000..2ef1681
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -0,0 +1,32 @@
+# See https://github.com/golangci/golangci-lint#config-file
+run:
+ issues-exit-code: 1 #Default
+ tests: true #Default
+
+linters:
+ enable:
+ - misspell
+ - goimports
+ - golint
+ - gofmt
+
+issues:
+ exclude-rules:
+ # helpers in tests often (rightfully) pass a *testing.T as their first argument
+ - path: _test\.go
+ text: "context.Context should be the first parameter of a function"
+ linters:
+ - golint
+ # Yes, they are, but it's okay in a test
+ - path: _test\.go
+ text: "exported func.*returns unexported type.*which can be annoying to use"
+ linters:
+ - golint
+
+linters-settings:
+ misspell:
+ locale: US
+ ignore-words:
+ - cancelled
+ goimports:
+ local-prefixes: go.opentelemetry.io
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
new file mode 100644
index 0000000..1b77504
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -0,0 +1,922 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [0.13.0] - 2020-10-08
+
+### Added
+
+- OTLP Metric exporter supports Histogram aggregation. (#1209)
+- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
+- A Baggage API to implement the OpenTelemetry specification. (#1217)
+
+### Changed
+
+- Set default propagator to no-op propagator. (#1184)
+- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
+- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
+- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
+ They now are `Unset`, `Error`, and `Ok`.
+ They no longer track the gRPC codes. (#1214)
+- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
+- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/propagators`. (#1217)
+
+### Fixed
+
+- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
+
+### Removed
+
+- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
+- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
+ The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
+- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
+- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
+- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
+- Nested array/slice support has been removed. (#1226)
+
+## [0.12.0] - 2020-09-24
+
+### Added
+
+- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
+- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
+ This addition was made to conform with our project option conventions. (#1155)
+- Instrumentation library information was added to the Zipkin exporter. (#1119)
+- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
+- More semantic conventions for k8s as resource attributes. (#1167)
+
+### Changed
+
+- Add reconnecting udp connection type to Jaeger exporter.
+ This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
+ It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
+- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
+ This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
+- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
+ This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
+- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
+- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
+ This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
+- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
+- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
+- Move `tools` package under `internal`. (#1141)
+- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
+ The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged.
+- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
+- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
+- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
+- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
+ recommend the use of `newConfig()` instead of `configure()`. (#1163)
+- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
+- Ensure exported interface types include parameter names and update the
+ Style Guide to reflect this styling rule. (#1172)
+- Don't consider unset environment variable for resource detection to be an error. (#1170)
+- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
+ `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
+- ValueObserver instruments use LastValue aggregator by default. (#1165)
+- OTLP Metric exporter supports LastValue aggregation. (#1165)
+- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
+- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
+- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
+- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
+- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
+- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
+- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
+- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
+- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
+- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
+- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
+
+### Removed
+
+- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
+ `go.opentelemetry.io/contrib/propagators/` module. (#1191)
+- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
+
+### Fixed
+
+- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
+- Fix missing shutdown processor in otel-collector example. (#1186)
+- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
+
+## [0.11.0] - 2020-08-24
+
+### Added
+
+- Support for exporting array-valued attributes via OTLP. (#992)
+- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
+- Support for filtering metric label sets. (#1047)
+- A dimensionality-reducing metric Processor. (#1057)
+- Integration tests for more OTel Collector Attribute types. (#1062)
+- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
+
+### Changed
+
+- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
+- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
+- Rename `api/testharness` to `api/apitest`. (#1049)
+- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
+- Change Metric Processor to merge multiple observations. (#1024)
+- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
+ This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
+- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
+- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
+- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
+- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
+- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
+- Unify Callback Function Naming.
+ Rename `*Callback` with `*Func`. (#1061)
+- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
+- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
+ This interface still supports the export of `SpanData`, but only as a slice.
+ Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
+ If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
+- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
+ This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
+
+### Removed
+
+- Duplicate, unused API sampler interface. (#999)
+ Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
+- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
+ This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
+- The `WithSpan` method of the `Tracer` interface.
+ The functionality this method provided was limited compared to what a user can provide themselves.
+ It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
+- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
+ These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
+- The `oterror` package. (#1026)
+- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
+
+### Fixed
+
+- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
+- Correct instrumentation version tag in Jaeger exporter. (#1037)
+- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
+- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
+- The `otel-collector` example referenced outdated collector processors. (#1006)
+
+## [0.10.0] - 2020-07-29
+
+This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
+
+### Added
+
+- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
+ These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
+- Add propagator option for gRPC instrumentation. (#986)
+- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
+
+### Changed
+
+- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
+ This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
+- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
+ This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
+- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
+- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
+ - `value.Bool` was replaced with `kv.BoolValue`.
+ - `value.Int64` was replaced with `kv.Int64Value`.
+ - `value.Uint64` was replaced with `kv.Uint64Value`.
+ - `value.Float64` was replaced with `kv.Float64Value`.
+ - `value.Int32` was replaced with `kv.Int32Value`.
+ - `value.Uint32` was replaced with `kv.Uint32Value`.
+ - `value.Float32` was replaced with `kv.Float32Value`.
+ - `value.String` was replaced with `kv.StringValue`.
+ - `value.Int` was replaced with `kv.IntValue`.
+ - `value.Uint` was replaced with `kv.UintValue`.
+ - `value.Array` was replaced with `kv.ArrayValue`.
+- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
+- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
+- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
+- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
+- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
+
+### Removed
+
+- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
+
+### Fixed
+
+- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
+- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
+- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
+- Correct Go language formatting in the README documentation. (#961)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
+- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
+- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
+
+## [0.9.0] - 2020-07-20
+
+### Added
+
+- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
+- A Detector to automatically detect resources from an environment variable. (#939)
+- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
+- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
+ References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
+
+### Changed
+
+- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
+
+### Removed
+
+- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
+
+## [0.8.0] - 2020-07-09
+
+### Added
+
+- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
+ A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
+- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
+- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
+- Add `peer.service` semantic attribute. (#898)
+- Add database-specific semantic attributes. (#899)
+- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
+- Add http content size semantic conventions. (#905)
+- Include `http.request_content_length` in HTTP request basic attributes. (#905)
+- Add semantic conventions for operating system process resource attribute keys. (#919)
+- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
+
+### Changed
+
+- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
+- Use lowercase header names for B3 Multiple Headers. (#881)
+- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
+ This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
+ If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
+- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
+ Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
+ This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
+- Extend semantic conventions for RPC. (#900)
+- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
+ - `"api/standard".FaaSName` -> `FaaSNameKey`
+ - `"api/standard".FaaSID` -> `FaaSIDKey`
+ - `"api/standard".FaaSVersion` -> `FaaSVersionKey`
+ - `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
+
+### Removed
+
+- The `FlagsUnused` trace flag is removed.
+ The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
+- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
+ If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
+
+### Fixed
+
+- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
+- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
+- The B3 propagator now propagates the debug flag.
+ This removes the behavior of changing the debug flag into a set sampling bit.
+ Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
+- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
+- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
+- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
+- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
+- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
+- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
+- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
+- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
+- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
+- Update otel-colector example to use the v0.5.0 collector. (#915)
+- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
+- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
+- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
+ This is in accordance with OpenTelemetry semantic conventions. (#922)
+- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
+- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
+- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
+- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
+
+## [0.7.0] - 2020-06-26
+
+This release implements the v0.5.0 version of the OpenTelemetry specification.
+
+### Added
+
+- The othttp instrumentation now includes default metrics. (#861)
+- This CHANGELOG file to track all changes in the project going forward.
+- Support for array type attributes. (#798)
+- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
+- Timestamps are now passed to exporters for each export. (#835)
+- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
+ This replaces the prior `Record` `struct` use for this purpose. (#835)
+- New dependabot integration to automate package upgrades. (#814)
+- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
+ This instrumentation version is passed on to exporters. (#811) (#805) (#802)
+- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
+- Environment variables for Jaeger exporter are supported. (#796)
+- New `aggregation.Kind` in the export metric API. (#808)
+- New example that uses OTLP and the collector. (#790)
+- Handle errors in the span `SetName` during span initialization. (#791)
+- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
+- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
+- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
+ There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
+- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
+- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
+- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
+
+### Changed
+
+- Rename `Integrator` to `Processor` in the metric SDK. (#863)
+- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
+- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
+- Rename `simple` integrator to `basic` integrator. (#857)
+- Merge otlp collector examples. (#841)
+- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
+ With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
+- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
+- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
+ All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
+ Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
+- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
+- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
+- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
+- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
+- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
+- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
+- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
+
+### Removed
+
+- `Uint64NumberKind` and related functions from the API. (#864)
+- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
+- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
+
+### Fixed
+
+- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
+- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
+- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
+- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
+- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
+- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
+- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
+- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
+- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
+- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
+- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
+- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
+- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
+- Set span status from HTTP status code in the othttp instrumentation. (#832)
+- Fixed typo in push controller comment. (#834)
+- The `Aggregator` testing has been updated and cleaned. (#812)
+- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
+- Fixed `global` `handler_test.go` test failure. #804
+- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
+- Fixed OTLP example's accidental early close of exporter. (#807)
+- Ensure zipkin exporter reads and closes response body. (#788)
+- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
+- Clean up tools and RELEASING documentation. (#762)
+
+## [0.6.0] - 2020-05-21
+
+### Added
+
+- Support for `Resource`s in the prometheus exporter. (#757)
+- New pull controller. (#751)
+- New `UpDownSumObserver` instrument. (#750)
+- OpenTelemetry collector demo. (#711)
+- New `SumObserver` instrument. (#747)
+- New `UpDownCounter` instrument. (#745)
+- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
+- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
+
+### Changed
+
+- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
+- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
+- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
+- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
+- The prometheus exporter now uses the new pull controller. (#751)
+- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
+- Support use of synchronous instruments in asynchronous callbacks (#725)
+- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
+- Rename `Observer` instrument to `ValueObserver`. (#734)
+- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
+- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
+- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727)
+
+### Fixed
+
+- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
+- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
+- Fix `string` case in `kv` `Infer` function. (#746)
+- Fix panic in grpctrace client interceptors. (#740)
+- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
+- Rewrite span batch process queue batching logic. (#719)
+- Remove the push controller named Meter map. (#738)
+- Fix Histogram aggregator initial state (fix #735). (#736)
+- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
+- Added test for grpctrace `UnaryInterceptorClient`. (#695)
+- Rearrange `api/metric` code layout. (#724)
+
+## [0.5.0] - 2020-05-13
+
+### Added
+
+- Batch `Observer` callback support. (#717)
+- Alias `api` types to root package of project. (#696)
+- Create basic `othttp.Transport` for simple client instrumentation. (#678)
+- `SetAttribute(string, interface{})` to the trace API. (#674)
+- Jaeger exporter option that allows user to specify custom http client. (#671)
+- `Stringer` and `Infer` methods to `key`s. (#662)
+
+### Changed
+
+- Rename `NewKey` in the `kv` package to just `Key`. (#721)
+- Move `core` and `key` to `kv` package. (#720)
+- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
+- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
+- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
+- Move `Number` from `core` to `api/metric` package. (#706)
+- Move `SpanContext` from `core` to `trace` package. (#692)
+- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
+
+### Fixed
+
+- Update tooling to run generators in all submodules. (#705)
+- gRPC interceptor regexp to match methods without a service name. (#683)
+- Use a `const` for padding 64-bit B3 trace IDs. (#701)
+- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
+- Left-pad 64-bit B3 trace IDs with zero. (#698)
+- Propagate at least the first W3C tracestate header. (#694)
+- Remove internal `StateLocker` implementation. (#688)
+- Increase instance size CI system uses. (#690)
+- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
+- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
+- Reimplement histogram using mutex instead of `StateLocker`. (#669)
+- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
+- Update documentation to not include any references to `WithKeys`. (#672)
+- Correct misspelling. (#668)
+- Fix clobbering of the span context if extraction fails. (#656)
+- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
+
+## [0.4.3] - 2020-04-24
+
+### Added
+
+- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
+- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
+- New `api/label` package, providing common label set implementation. (#651)
+- Support for JSON marshaling of `Resources`. (#654)
+- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
+- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
+- `WithSpanFormatter` option to the othttp plugin. (#617)
+- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
+- The prometheus exporter now supports exporting histograms. (#601)
+- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
+- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
+- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
+- An iterable structure (`AttributeIterator`) for `Resource` attributes.
+
+### Changed
+
+- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
+- Pass `Resources` through the metrics export pipeline. (#659)
+
+### Removed
+
+- `WithKeys` option from the metric API. (#639)
+
+### Fixed
+
+- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
+- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
+- Use type names for return values in jaeger exporter. (#648)
+- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
+- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
+- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
+- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
+- Add error wrapping to the prometheus exporter. (#631)
+- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
+- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
+- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
+- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
+- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
+- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
+- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
+
+
+## [0.4.2] - 2020-03-31
+
+### Fixed
+
+- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
+- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
+
+## [0.4.1] - 2020-03-31
+
+### Fixed
+
+- Update `tag.sh` to create signed tags. (#604)
+
+## [0.4.0] - 2020-03-30
+
+### Added
+
+- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
+- Script to verify examples after a new release. (#579)
+
+### Removed
+
+- The dogstatsd exporter due to lack of support.
+ This additionally removes support for statsd. (#591)
+- `LabelSet` from the metric API.
+ This is replaced by a `[]core.KeyValue` slice. (#595)
+- `Labels` from the metric API's `Meter` interface. (#595)
+
+### Changed
+
+- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
+- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
+- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
+
+### Fixed
+
+- Corrected missing return in mock span. (#582)
+- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
+- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
+- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
+- Add a `RecordBatch` benchmark. (#594)
+- Moved span transforms of the OTLP exporter to the internal package. (#593)
+- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
+- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
+- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
+- Update project documentation godoc.org links to pkg.go.dev. (#602)
+
+## [0.3.0] - 2020-03-21
+
+This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
+There is still a possibility of breaking changes.
+
+### Added
+
+- Add `Observer` metric instrument. (#474)
+- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
+- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
+- The zipkin trace exporter. (#495)
+- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
+- The `StatusMessage` field was add to the trace `Span`. (#524)
+- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
+- The `Resource` type was added to the SDK. (#528)
+- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
+- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
+ Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
+- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
+- Scripts to better automate the release process. (#576)
+
+### Changed
+
+- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
+- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
+- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
+- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
+- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
+- Rename metric API `Options` to `Config`. (#541)
+- Rename metric `Counter` aggregator to be `Sum`. (#541)
+- Unify metric options into `Option` from instrument specific options. (#541)
+- The trace API's `TraceProvider` now support `Resource`s. (#545)
+- Correct error in zipkin module name. (#548)
+- The jaeger trace exporter now supports `Resource`s. (#551)
+- Metric SDK now supports `Resource`s.
+ The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
+- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
+- The stdout trace exporter now supports `Resource`s. (#558)
+- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
+- Replace `Ordered` with an iterator in `export.Labels`. (#567)
+
+### Removed
+
+- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
+- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
+- `GetDescriptor` from the metric SDK. (#575)
+- The `Gauge` instrument from the metric API. (#537)
+
+### Fixed
+
+- Make histogram aggregator checkpoint consistent. (#438)
+- Update README with import instructions and how to build and test. (#505)
+- The default label encoding was updated to be unique. (#508)
+- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
+- Fix data race in `BatchedSpanProcessor`. (#518)
+- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
+- Use a variable-size array to represent ordered labels in maps. (#523)
+- Update the OTLP protobuf and update changed import path. (#532)
+- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
+- Eliminate goroutine leak in histogram stress test. (#547)
+- Update OTLP exporter with latest protobuf. (#550)
+- Add filters to the othttp plugin. (#556)
+- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
+- Encode labels once during checkpoint.
+ The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
+ This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
+- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
+
+## [0.2.3] - 2020-03-04
+
+### Added
+
+- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
+- Configurable push frequency for exporters setup pipeline. (#504)
+
+### Changed
+
+- Rename the `exporter` directory to `exporters`.
+ The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
+ This resulted in all subsequent releases not becoming the default latest.
+ A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
+ Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
+ Consequentially, this action also renames *all* exporter packages. (#502)
+
+### Removed
+
+- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
+
+## [0.2.2] - 2020-02-27
+
+### Added
+
+- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
+- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
+- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
+- `Config` and configuring `Option` to the propagator API. (#467)
+- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
+- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
+- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
+- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
+- Histogram aggregator. (#433)
+- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
+- `AlwaysParentSample` sampler to the trace API. (#455)
+- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
+
+
+### Changed
+
+- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
+- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
+- Move correlation context propagation to correlation package. (#479)
+- Do not default to putting remote span context into links. (#480)
+- Propagators extrac
+- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
+- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
+- Renamed the `export` package to `metric` to match directory structure. (#432)
+- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
+- Rename the `api/propagators` package to `api/propagation`. (#444)
+- Move the propagators from the `propagators` package into the `trace` API package. (#444)
+- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
+- Moved all dependencies of tools package to a tools directory. (#466)
+
+### Removed
+
+- Binary propagators. (#467)
+- NOOP propagator. (#467)
+
+### Fixed
+
+- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
+- Fix a possible nil-dereference crash (#478)
+- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
+- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
+- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
+- Initialize `onError` based on `Config` in prometheus exporter. (#486)
+- Correct module name in prometheus exporter README. (#475)
+- Removed tracer name prefix from span names. (#430)
+- Fix `aggregator_test.go` import package comment. (#431)
+- Improved detail in stdout exporter. (#436)
+- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
+- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
+- Reword function documentation in gRPC plugin. (#446)
+- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
+- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
+- Upgraded to Go 1.13 in CI. (#465)
+- Correct opentelemetry.io URL in trace SDK documentation. (#464)
+- Refactored reference counting logic in SDK determination of stale records. (#468)
+- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
+
+## [0.2.1.1] - 2020-01-13
+
+### Fixed
+
+- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428)
+
+## [0.2.1] - 2020-01-08
+
+### Added
+
+- Global meter forwarding implementation.
+ This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
+- Global trace forwarding implementation.
+ This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
+- Standardize export pipeline creation in all exporters. (#395)
+- A testing, organization, and comments for 64-bit field alignment. (#418)
+- Script to tag all modules in the project. (#414)
+
+### Changed
+
+- Renamed `propagation` package to `propagators`. (#362)
+- Renamed `B3Propagator` propagator to `B3`. (#362)
+- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
+- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
+- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
+- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
+- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
+- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
+- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
+- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
+- `Number` now has a pointer receiver for its methods. (#375)
+- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
+- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
+- Renamed `Message` in Event to `Name` in the trace API. (#389)
+- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
+- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
+- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
+- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
+- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
+- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
+- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
+- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
+- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
+
+### Fixed
+
+- Aggregator import path corrected. (#421)
+- Correct links in README. (#368)
+- The README was updated to match latest code changes in its examples. (#374)
+- Don't capitalize error statements. (#375)
+- Fix ignored errors. (#375)
+- Fix ambiguous variable naming. (#375)
+- Removed unnecessary type casting. (#375)
+- Use named parameters. (#375)
+- Updated release schedule. (#378)
+- Correct http-stackdriver example module name. (#394)
+- Removed the `http.request` span in `httptrace` package. (#397)
+- Add comments in the metrics SDK (#399)
+- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
+- Add documentation of compatible exporters in the README. (#405)
+- Typo fix. (#408)
+- Simplify span check logic in SDK tracer implementation. (#419)
+
+## [0.2.0] - 2019-12-03
+
+### Added
+
+- Unary gRPC tracing example. (#351)
+- Prometheus exporter. (#334)
+- Dogstatsd metrics exporter. (#326)
+
+### Changed
+
+- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
+- Rename `GetMeter` to `Meter`. (#357)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
+- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
+- Move `/global` package to `/api/global`. (#356)
+- Rename `GetTracer` to `Tracer`. (#347)
+
+### Removed
+
+- `SetAttribute` from the `Span` interface in the trace API. (#361)
+- `AddLink` from the `Span` interface in the trace API. (#349)
+- `Link` from the `Span` interface in the trace API. (#349)
+
+### Fixed
+
+- Exclude example directories from coverage report. (#365)
+- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
+- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
+- Run the race checker for all test. (#354)
+- Redundant commands in the Makefile are removed. (#354)
+- Split the `generate` and `lint` targets of the Makefile. (#354)
+- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
+- Add example Prometheus binary to gitignore. (#358)
+- Support negative numbers with the `MaxSumCount`. (#335)
+- Resolve race conditions in `push_test.go` identified in #339. (#340)
+- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
+- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
+ Previously it was testing `AlwaysSample` twice. (#325)
+- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
+- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
+- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
+ This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
+ This was corrected. (#333)
+
+
+## [0.1.2] - 2019-11-18
+
+### Fixed
+
+- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
+- Removed unnecessary unslicing of parameters that are already a slice. (#324)
+
+## [0.1.1] - 2019-11-18
+
+This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
+
+### Added
+
+- Metrics stdout export pipeline. (#265)
+- Array aggregation for raw measure metrics. (#282)
+- The core.Value now have a `MarshalJSON` method. (#281)
+
+### Removed
+
+- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
+- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
+
+### Changed
+
+- Allocation in LabelSet construction to reduce GC overhead. (#318)
+- `trace.WithAttributes` to append values instead of replacing (#315)
+- Use a formula for tolerance in sampling tests. (#298)
+- Move export types into trace and metric-specific sub-directories. (#289)
+- `SpanKind` back to being based on an `int` type. (#288)
+
+### Fixed
+
+- URL to OpenTelemetry website in README. (#323)
+- Name of othttp default tracer. (#321)
+- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
+- CI modules cache to correctly restore/save from/to the cache. (#316)
+- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
+- README now reflects the new code structure introduced with these changes. (#291)
+- Make the basic example work. (#279)
+
+## [0.1.0] - 2019-11-04
+
+This is the first release of open-telemetry go library.
+It contains api and sdk for trace and meter.
+
+### Added
+
+- Initial OpenTelemetry trace and metric API prototypes.
+- Initial OpenTelemetry trace, metric, and export SDK packages.
+- A wireframe bridge to support compatibility with OpenTracing.
+- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
+- Exporters for Jaeger, Stackdriver, and stdout.
+- Propagators for binary, B3, and trace-context protocols.
+- Project information and guidelines in the form of a README and CONTRIBUTING.
+- Tools to build the project and a Makefile to automate the process.
+- Apache-2.0 license.
+- CircleCI build CI manifest files.
+- CODEOWNERS file to track owners of this project.
+
+
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.13.0...HEAD
+[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
+[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
+[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
+[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
+[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
+[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
+[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
+[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
+[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
+[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
+[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
+[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
+[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
+[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
+[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
+[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
+[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
+[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
+[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
+[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
+[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
+[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
new file mode 100644
index 0000000..b2e99fc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -0,0 +1,17 @@
+#####################################################
+#
+# List of approvers for this repository
+#
+#####################################################
+#
+# Learn about membership in OpenTelemetry community:
+# https://github.com/open-telemetry/community/blob/master/community-membership.md
+#
+#
+# Learn about CODEOWNERS file format:
+# https://help.github.com/en/articles/about-code-owners
+#
+
+* @jmacd @lizthegrey @MrAlias @Aneurysm9 @evantorrie @XSAM
+
+CODEOWNERS @MrAlias @Aneurysm9
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
new file mode 100644
index 0000000..85d9a09
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -0,0 +1,373 @@
+# Contributing to opentelemetry-go
+
+The Go special interest group (SIG) meets regularly. See the
+OpenTelemetry
+[community](https://github.com/open-telemetry/community#golang-sdk)
+repo for information on this and other language SIGs.
+
+See the [public meeting
+notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
+for a summary description of past meetings. To request edit access,
+join the meeting or get in touch on
+[Gitter](https://gitter.im/open-telemetry/opentelemetry-go).
+
+## Development
+
+You can view and edit the source code by cloning this repository:
+
+```bash
+git clone https://github.com/open-telemetry/opentelemetry-go.git
+```
+
+Run `make test` to run the tests instead of `go test`.
+
+There are some generated files checked into the repo. To make sure
+that the generated files are up-to-date, run `make` (or `make
+precommit` - the `precommit` target is the default).
+
+The `precommit` target also fixes the formatting of the code and
+checks the status of the go module files.
+
+If after running `make precommit` the output of `git status` contains
+`nothing to commit, working tree clean` then it means that everything
+is up-to-date and properly formatted.
+
+## Pull Requests
+
+### How to Send Pull Requests
+
+Everyone is welcome to contribute code to `opentelemetry-go` via
+GitHub pull requests (PRs).
+
+To create a new PR, fork the project in GitHub and clone the upstream
+repo:
+
+```sh
+$ go get -d go.opentelemetry.io/otel
+```
+
+(This may print some warning about "build constraints exclude all Go
+files", just ignore it.)
+
+This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
+can alternatively use `git` directly with:
+
+```sh
+$ git clone https://github.com/open-telemetry/opentelemetry-go
+```
+
+(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
+that name is a kind of a redirector to GitHub that `go get` can
+understand, but `git` does not.)
+
+This would put the project in the `opentelemetry-go` directory in
+current working directory.
+
+Enter the newly created directory and add your fork as a new remote:
+
+```sh
+$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
+```
+
+Check out a new branch, make modifications, run linters and tests, update
+`CHANGELOG.md`, and push the branch to your fork:
+
+```sh
+$ git checkout -b <YOUR_BRANCH_NAME>
+# edit files
+# update changelog
+$ make precommit
+$ git add -p
+$ git commit
+$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
+```
+
+Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
+request ID to the entry you added to `CHANGELOG.md`.
+
+### How to Receive Comments
+
+* If the PR is not ready for review, please put `[WIP]` in the title,
+ tag it as `work-in-progress`, or mark it as
+ [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
+* Make sure CLA is signed and CI is clear.
+
+### How to Get PRs Merged
+
+A PR is considered to be **ready to merge** when:
+
+* It has received two approvals from Collaborators/Maintainers (at
+ different companies). This is not enforced through technical means
+ and a PR may be **ready to merge** with a single approval if the change
+ and its approach have been discussed and consensus reached.
+* Major feedbacks are resolved.
+* It has been open for review for at least one working day. This gives
+ people reasonable time to review.
+* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
+ one day and may be merged with a single Maintainer's approval.
+* `CHANGELOG.md` has been updated to reflect what has been
+ added, changed, removed, or fixed.
+* Urgent fix can take exception as long as it has been actively
+ communicated.
+
+Any Maintainer can merge the PR once it is **ready to merge**.
+
+## Design Choices
+
+As with other OpenTelemetry clients, opentelemetry-go follows the
+[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
+
+It's especially valuable to read through the [library
+guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/library-guidelines.md).
+
+### Focus on Capabilities, Not Structure Compliance
+
+OpenTelemetry is an evolving specification, one where the desires and
+use cases are clear, but the method to satisfy those uses cases are
+not.
+
+As such, Contributions should provide functionality and behavior that
+conforms to the specification, but the interface and structure is
+flexible.
+
+It is preferable to have contributions follow the idioms of the
+language rather than conform to specific API names or argument
+patterns in the spec.
+
+For a deeper discussion, see:
+https://github.com/open-telemetry/opentelemetry-specification/issues/165
+
+## Style Guide
+
+One of the primary goals of this project is that it is actually used by
+developers. With this goal in mind the project strives to build
+user-friendly and idiomatic Go code adhering to the Go community's best
+practices.
+
+For a non-comprehensive but foundational overview of these best practices
+the [Effective Go](https://golang.org/doc/effective_go.html) documentation
+is an excellent starting place.
+
+As a convenience for developers building this project the `make precommit`
+will format, lint, validate, and in some cases fix the changes you plan to
+submit. This check will need to pass for your changes to be able to be
+merged.
+
+In addition to idiomatic Go, the project has adopted certain standards for
+implementations of common patterns. These standards should be followed as a
+default, and if they are not followed documentation needs to be included as
+to the reasons why.
+
+### Configuration
+
+When creating an instantiation function for a complex `struct` it is useful
+to allow variable number of options to be applied. However, the strong type
+system of Go restricts the function design options. There are a few ways to
+solve this problem, but we have landed on the following design.
+
+#### `config`
+
+Configuration should be held in a `struct` named `config`, or prefixed with
+specific type name this Configuration applies to if there are multiple
+`config` in the package. This `struct` must contain configuration options.
+
+```go
+// config contains configuration options for a thing.
+type config struct {
+ // options ...
+}
+```
+
+In general the `config` `struct` will not need to be used externally to the
+package and should be unexported. If, however, it is expected that the user
+will likely want to build custom options for the configuration, the `config`
+should be exported. Please, include in the documentation for the `config`
+how the user can extend the configuration.
+
+It is important that `config` are not shared across package boundaries.
+Meaning a `config` from one package should not be directly used by another.
+
+Optionally, it is common to include a `newConfig` function (with the same
+naming scheme). This function wraps any defaults setting and looping over
+all options to create a configured `config`.
+
+```go
+// newConfig returns an appropriately configured config.
+func newConfig([]Option) config {
+ // Set default values for config.
+ config := config{/* […] */}
+ for _, option := range options {
+ option.Apply(&config)
+ }
+ // Preform any validation here.
+ return config
+}
+```
+
+If validation of the `config` options is also preformed this can return an
+error as well that is expected to be handled by the instantiation function
+or propagated to the user.
+
+Given the design goal of not having the user need to work with the `config`,
+the `newConfig` function should also be unexported.
+
+#### `Option`
+
+To set the value of the options a `config` contains, a corresponding
+`Option` interface type should be used.
+
+```go
+type Option interface {
+ Apply(*config)
+}
+```
+
+The name of the interface should be prefixed in the same way the
+corresponding `config` is (if at all).
+
+#### Options
+
+All user configurable options for a `config` must have a related unexported
+implementation of the `Option` interface and an exported configuration
+function that wraps this implementation.
+
+The wrapping function name should be prefixed with `With*` (or in the
+special case of a boolean options `Without*`) and should have the following
+function signature.
+
+```go
+func With*(…) Option { … }
+```
+
+##### `bool` Options
+
+```go
+type defaultFalseOption bool
+
+func (o defaultFalseOption) Apply(c *config) {
+ c.Bool = bool(o)
+}
+
+// WithOption sets a T* to have an option included.
+func WithOption() Option {
+ return defaultFalseOption(true)
+}
+```
+
+```go
+type defaultTrueOption bool
+
+func (o defaultTrueOption) Apply(c *config) {
+ c.Bool = bool(o)
+}
+
+// WithoutOption sets a T* to have Bool option excluded.
+func WithoutOption() Option {
+ return defaultTrueOption(false)
+}
+````
+
+##### Declared Type Options
+
+```go
+type myTypeOption struct {
+ MyType MyType
+}
+
+func (o myTypeOption) Apply(c *config) {
+ c.MyType = o.MyType
+}
+
+// WithMyType sets T* to have include MyType.
+func WithMyType(t MyType) Option {
+ return myTypeOption{t}
+}
+```
+
+#### Instantiation
+
+Using this configuration pattern to configure instantiation with a `New*`
+function.
+
+```go
+func NewT*(options ...Option) T* {…}
+```
+
+Any required parameters can be declared before the variadic `options`.
+
+#### Dealing with Overlap
+
+Sometimes there are multiple complex `struct` that share common
+configuration and also have distinct configuration. To avoid repeated
+portions of `config`s, a common `config` can be used with the union of
+options being handled with the `Option` interface.
+
+For example.
+
+```go
+// config holds options for all animals.
+type config struct {
+ Weight float64
+ Color string
+ MaxAltitude float64
+}
+
+// DogOption apply Dog specific options.
+type DogOption interface {
+ ApplyDog(*config)
+}
+
+// BirdOption apply Bird specific options.
+type BirdOption interface {
+ ApplyBird(*config)
+}
+
+// Option apply options for all animals.
+type Option interface {
+ BirdOption
+ DogOption
+}
+
+type weightOption float64
+func (o weightOption) ApplyDog(c *config) { c.Weight = float64(o) }
+func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) }
+func WithWeight(w float64) Option { return weightOption(w) }
+
+type furColorOption string
+func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) }
+func WithFurColor(c string) DogOption { return furColorOption(c) }
+
+type maxAltitudeOption float64
+func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) }
+func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
+
+func NewDog(name string, o ...DogOption) Dog {…}
+func NewBird(name string, o ...BirdOption) Bird {…}
+```
+
+### Interface Type
+
+To allow other developers to better comprehend the code, it is important
+to ensure it is sufficiently documented. One simple measure that contributes
+to this aim is self-documenting by naming method parameters. Therefore,
+where appropriate, methods of every exported interface type should have
+their parameters appropriately named.
+
+## Approvers and Maintainers
+
+Approvers:
+
+- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
+- [Evan Torrie](https://github.com/evantorrie), Verizon Media
+- [Josh MacDonald](https://github.com/jmacd), LightStep
+- [Sam Xie](https://github.com/XSAM)
+
+Maintainers:
+
+- [Anthony Mirabella](https://github.com/Aneurysm9), Centene
+- [Tyler Yahn](https://github.com/MrAlias), New Relic
+
+### Become an Approver or a Maintainer
+
+See the [community membership document in OpenTelemetry community
+repo](https://github.com/open-telemetry/community/blob/master/community-membership.md).
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
new file mode 100644
index 0000000..85506a3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -0,0 +1,177 @@
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
+TOOLS_MOD_DIR := ./internal/tools
+
+# All source code and documents. Used in spell check.
+ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
+# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
+ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort))
+ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
+
+# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test
+SKIP_386_TEST = false
+UNAME_S := $(shell uname -s)
+ifeq ($(UNAME_S),Darwin)
+ SW_VERS := $(shell sw_vers -productVersion)
+ ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS))
+ SKIP_386_TEST = true
+ endif
+endif
+
+GOTEST_MIN = go test -timeout 30s
+GOTEST = $(GOTEST_MIN) -race
+GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./...
+
+.DEFAULT_GOAL := precommit
+
+.PHONY: precommit
+
+TOOLS_DIR := $(abspath ./.tools)
+
+$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
+ cd $(TOOLS_MOD_DIR) && \
+ go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
+
+$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
+ cd $(TOOLS_MOD_DIR) && \
+ go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell
+
+$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
+ cd $(TOOLS_MOD_DIR) && \
+ go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer
+
+$(TOOLS_DIR)/gojq: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
+ cd $(TOOLS_MOD_DIR) && \
+ go build -o $(TOOLS_DIR)/gojq github.com/itchyny/gojq/cmd/gojq
+
+precommit: dependabot-check license-check generate build lint examples test-benchmarks test
+
+.PHONY: test-with-coverage
+test-with-coverage:
+ set -e; \
+ printf "" > coverage.txt; \
+ for dir in $(ALL_COVERAGE_MOD_DIRS); do \
+ echo "go test ./... + coverage in $${dir}"; \
+ (cd "$${dir}" && \
+ $(GOTEST_WITH_COVERAGE) ./... && \
+ go tool cover -html=coverage.out -o coverage.html); \
+ [ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
+ done; \
+ sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt
+
+
+.PHONY: ci
+ci: precommit check-clean-work-tree test-with-coverage test-386
+
+.PHONY: check-clean-work-tree
+check-clean-work-tree:
+ @if ! git diff --quiet; then \
+ echo; \
+ echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
+ echo; \
+ git status; \
+ exit 1; \
+ fi
+
+.PHONY: build
+build:
+ # TODO: Fix this on windows.
+ set -e; for dir in $(ALL_GO_MOD_DIRS); do \
+ echo "compiling all packages in $${dir}"; \
+ (cd "$${dir}" && \
+ go build ./... && \
+ go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \
+ done
+
+.PHONY: test
+test:
+ set -e; for dir in $(ALL_GO_MOD_DIRS); do \
+ echo "go test ./... + race in $${dir}"; \
+ (cd "$${dir}" && \
+ $(GOTEST) ./...); \
+ done
+
+.PHONY: test-386
+test-386:
+ if [ $(SKIP_386_TEST) = true ] ; then \
+ echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \
+ else \
+ set -e; for dir in $(ALL_GO_MOD_DIRS); do \
+ echo "go test ./... GOARCH 386 in $${dir}"; \
+ (cd "$${dir}" && \
+ GOARCH=386 $(GOTEST_MIN) ./...); \
+ done; \
+ fi
+
+.PHONY: examples
+examples:
+ @set -e; for ex in $(EXAMPLES); do \
+ echo "Building $${ex}"; \
+ (cd "$${ex}" && \
+ go build .); \
+ done
+
+.PHONY: test-benchmarks
+test-benchmarks:
+ @set -e; for dir in $(ALL_GO_MOD_DIRS); do \
+ echo "test benchmarks in $${dir}"; \
+ (cd "$${dir}" && go test -test.benchtime=1ms -run=NONE -bench=. ./...) > /dev/null; \
+ done
+
+.PHONY: lint
+lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell
+ set -e; for dir in $(ALL_GO_MOD_DIRS); do \
+ echo "golangci-lint in $${dir}"; \
+ (cd "$${dir}" && \
+ $(TOOLS_DIR)/golangci-lint run --fix && \
+ $(TOOLS_DIR)/golangci-lint run); \
+ done
+ $(TOOLS_DIR)/misspell -w $(ALL_DOCS)
+ set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go mod tidy); \
+ done
+
+generate: $(TOOLS_DIR)/stringer
+ set -e; for dir in $(ALL_GO_MOD_DIRS); do \
+ echo "running generators in $${dir}"; \
+ (cd "$${dir}" && \
+ PATH="$(TOOLS_DIR):$${PATH}" go generate ./...); \
+ done
+
+.PHONY: license-check
+license-check:
+ @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
+ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
+ done); \
+ if [ -n "$${licRes}" ]; then \
+ echo "license header checking failed:"; echo "$${licRes}"; \
+ exit 1; \
+ fi
+
+.PHONY: dependabot-check
+dependabot-check:
+ @result=$$( \
+ for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
+ do grep -q "$$f" .github/dependabot.yml \
+ || echo "$$f"; \
+ done; \
+ ); \
+ if [ -n "$$result" ]; then \
+ echo "missing go.mod dependabot check:"; echo "$$result"; \
+ exit 1; \
+ fi
diff --git a/vendor/go.opentelemetry.io/otel/Makefile.proto b/vendor/go.opentelemetry.io/otel/Makefile.proto
new file mode 100644
index 0000000..417c3b3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/Makefile.proto
@@ -0,0 +1,72 @@
+# -*- mode: makefile; -*-
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This Makefile.proto has rules to generate *.pb.go files in
+# `exporters/otlp/internal/opentelemetry-proto-gen` from the .proto files in
+# `exporters/otlp/internal/opentelemetry-proto` using protoc with a go plugin.
+#
+# The protoc binary and other tools are sourced from a docker image
+# `PROTOC_IMAGE`.
+#
+# Prereqs: The archiving utility `pax` is installed.
+
+PROTOC_IMAGE := namely/protoc-all:1.29_2
+PROTOBUF_VERSION := v1
+OTEL_PROTO_SUBMODULE := exporters/otlp/internal/opentelemetry-proto
+PROTOBUF_GEN_DIR := exporters/otlp/internal/opentelemetry-proto-gen
+PROTOBUF_TEMP_DIR := gen/pb-go
+PROTO_SOURCE_DIR := gen/proto
+SUBMODULE_PROTO_FILES := $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/*/$(PROTOBUF_VERSION)/*.proto \
+ $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/collector/*/$(PROTOBUF_VERSION)/*.proto)
+SOURCE_PROTO_FILES := $(subst $(OTEL_PROTO_SUBMODULE),$(PROTO_SOURCE_DIR),$(SUBMODULE_PROTO_FILES))
+
+default: protobuf
+
+.PHONY: protobuf protobuf-source gen-protobuf copy-protobufs
+protobuf: protobuf-source gen-protobuf copy-protobufs
+
+protobuf-source: $(SOURCE_PROTO_FILES) | $(PROTO_SOURCE_DIR)/
+
+# Changes go_package in .proto file to point to repo-local location
+define exec-replace-pkgname
+sed 's,go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go,go_package = "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen,' < $(1) > $(2)
+
+endef
+
+# replace opentelemetry-proto package name by go.opentelemetry.io/otel specific version
+$(SOURCE_PROTO_FILES): $(PROTO_SOURCE_DIR)/%.proto: $(OTEL_PROTO_SUBMODULE)/%.proto
+ @mkdir -p $(@D)
+ $(call exec-replace-pkgname,$<,$@)
+
+# Command to run protoc using docker image
+define exec-protoc-all
+docker run -v `pwd`:/defs $(PROTOC_IMAGE) $(1)
+
+endef
+
+gen-protobuf: $(SOURCE_PROTO_FILES) | $(PROTOBUF_GEN_DIR)/
+ $(foreach file,$(subst ${PROTO_SOURCE_DIR}/,,$(SOURCE_PROTO_FILES)),$(call exec-protoc-all, -i $(PROTO_SOURCE_DIR) -f ${file} -l gogo -o ${PROTOBUF_TEMP_DIR}))
+
+# requires `pax` to be installed, as it has consistent options for both BSD (Darwin) and Linux
+copy-protobufs: | $(PROTOBUF_GEN_DIR)/
+ find ./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR) -type f -print0 | \
+ pax -0 -s ',^./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR),,' -rw ./$(PROTOBUF_GEN_DIR)
+
+$(PROTO_SOURCE_DIR)/ $(PROTOBUF_GEN_DIR)/:
+ mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -rf ./gen
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
new file mode 100644
index 0000000..e45f05e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -0,0 +1,57 @@
+# OpenTelemetry-Go
+
+[![Circle CI](https://circleci.com/gh/open-telemetry/opentelemetry-go.svg?style=svg)](https://circleci.com/gh/open-telemetry/opentelemetry-go)
+[![Docs](https://godoc.org/go.opentelemetry.io/otel?status.svg)](https://pkg.go.dev/go.opentelemetry.io/otel)
+[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
+[![Gitter](https://badges.gitter.im/open-telemetry/opentelemetry-go.svg)](https://gitter.im/open-telemetry/opentelemetry-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+The Go [OpenTelemetry](https://opentelemetry.io/) implementation.
+
+## Getting Started
+
+OpenTelemetry's goal is to provide a single set of APIs to capture distributed
+traces and metrics from your application and send them to an observability
+platform. This project allows you to do just that for applications written in
+Go. There are two steps to this process: instrument your application, and
+configure an exporter.
+
+### Instrumentation
+
+To start capturing distributed traces and metric events from your application
+it first needs to be instrumented. The easiest way to do this is by using an
+instrumentation library for your code. Be sure to check out [the officially
+supported instrumentation
+libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/instrumentation).
+
+If you need to extend the telemetry an instrumentation library provides or want
+to build your own instrumentation for your application directly you will need
+to use the
+[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api)
+package. The included [examples](./example/) are a good way to see some
+practical uses of this process.
+
+### Export
+
+Now that your application is instrumented to collect telemetry, it needs an
+export pipeline to send that telemetry to an observability platform.
+
+You can find officially supported exporters [here](./exporters/) and in the
+companion [contrib
+repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/exporters/metric).
+Additionally, there are many vendor specific or 3rd party exporters for
+OpenTelemetry. These exporters are broken down by
+[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby)
+and
+[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby)
+support.
+
+## Project Status
+
+[Project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
+and [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones)
+can be found at the respective links. We try to keep these accurate and should
+be the best place to go for answers on project status.
+
+## Contributing
+
+See the [contributing documentation](CONTRIBUTING.md).
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
new file mode 100644
index 0000000..0fba7f1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -0,0 +1,81 @@
+# Release Process
+
+## Pre-Release
+
+Update go.mod for submodules to depend on the new release which will happen in the next step.
+
+1. Run the pre-release script. It creates a branch `pre_release_<new tag>` that will contain all release changes.
+
+ ```
+ ./pre_release.sh -t <new tag>
+ ```
+
+2. Verify the changes.
+
+ ```
+ git diff master
+ ```
+
+ This should have changed the version for all modules to be `<new tag>`.
+
+3. Update the [Changelog](./CHANGELOG.md).
+ - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
+ To verify this, you can look directly at the commits since the `<last tag>`.
+
+ ```
+ git --no-pager log --pretty=oneline "<last tag>..HEAD"
+ ```
+
+ - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
+ - Update all the appropriate links at the bottom.
+
+4. Push the changes to upstream and create a Pull Request on GitHub.
+ Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
+
+
+## Tag
+
+Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
+
+***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
+Failure to do so will leave things in a broken state.
+
+***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
+It is critical you make sure the version you push upstream is correct.
+[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
+
+1. Run the tag.sh script using the `<commit-hash>` of the commit on the master branch for the merged Pull Request.
+
+ ```
+ ./tag.sh <new tag> <commit-hash>
+ ```
+
+2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
+ Make sure you push all sub-modules as well.
+
+ ```
+ git push upstream <new tag>
+ git push upstream <submodules-path/new tag>
+ ...
+ ```
+
+## Release
+
+Finally create a Release for the new `<new tag>` on GitHub.
+The release body should include all the release notes from the Changelog for this release.
+Additionally, the `tag.sh` script generates commit logs since last release which can be used to supplement the release notes.
+
+## Verify Examples
+
+After releasing verify that examples build outside of the repository.
+
+```
+./verify_examples.sh
+```
+
+The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
+This ensures they build with the published release, not the local copy.
+
+## Contrib Repository
+
+Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/master/RELEASING.md) that uses this release.
diff --git a/vendor/go.opentelemetry.io/otel/api/global/doc.go b/vendor/go.opentelemetry.io/otel/api/global/doc.go
new file mode 100644
index 0000000..fce69e9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package global provides global providers, propagators and more.
+package global // import "go.opentelemetry.io/otel/api/global"
diff --git a/vendor/go.opentelemetry.io/otel/api/global/handler.go b/vendor/go.opentelemetry.io/otel/api/global/handler.go
new file mode 100644
index 0000000..83f3e52
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/handler.go
@@ -0,0 +1,91 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global
+
+import (
+ "log"
+ "os"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel"
+)
+
+var (
+ // globalErrorHandler provides an ErrorHandler that can be used
+ // throughout an OpenTelemetry instrumented project. When a user
+ // specified ErrorHandler is registered (`SetErrorHandler`) all calls to
+ // `Handle` and will be delegated to the registered ErrorHandler.
+ globalErrorHandler = &loggingErrorHandler{
+ l: log.New(os.Stderr, "", log.LstdFlags),
+ }
+
+ // delegateErrorHandlerOnce ensures that a user provided ErrorHandler is
+ // only ever registered once.
+ delegateErrorHandlerOnce sync.Once
+
+ // Comiple time check that loggingErrorHandler implements ErrorHandler.
+ _ otel.ErrorHandler = (*loggingErrorHandler)(nil)
+)
+
+// loggingErrorHandler logs all errors to STDERR.
+type loggingErrorHandler struct {
+ delegate atomic.Value
+
+ l *log.Logger
+}
+
+// setDelegate sets the ErrorHandler delegate if one is not already set.
+func (h *loggingErrorHandler) setDelegate(d otel.ErrorHandler) {
+ if h.delegate.Load() != nil {
+ // Delegate already registered
+ return
+ }
+ h.delegate.Store(d)
+}
+
+// Handle implements otel.ErrorHandler.
+func (h *loggingErrorHandler) Handle(err error) {
+ if d := h.delegate.Load(); d != nil {
+ d.(otel.ErrorHandler).Handle(err)
+ return
+ }
+ h.l.Print(err)
+}
+
+// ErrorHandler returns the global ErrorHandler instance. If no ErrorHandler
+// instance has been set (`SetErrorHandler`), the default ErrorHandler which
+// logs errors to STDERR is returned.
+func ErrorHandler() otel.ErrorHandler {
+ return globalErrorHandler
+}
+
+// SetErrorHandler sets the global ErrorHandler to be h.
+func SetErrorHandler(h otel.ErrorHandler) {
+ delegateErrorHandlerOnce.Do(func() {
+ current := ErrorHandler()
+ if current == h {
+ return
+ }
+ if internalHandler, ok := current.(*loggingErrorHandler); ok {
+ internalHandler.setDelegate(h)
+ }
+ })
+}
+
+// Handle is a convience function for ErrorHandler().Handle(err)
+func Handle(err error) {
+ ErrorHandler().Handle(err)
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go b/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go
new file mode 100644
index 0000000..b45f4c9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go
@@ -0,0 +1,347 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "go.opentelemetry.io/otel/api/metric"
+ "go.opentelemetry.io/otel/api/metric/registry"
+ "go.opentelemetry.io/otel/label"
+)
+
+// This file contains the forwarding implementation of MeterProvider used as
+// the default global instance. Metric events using instruments provided by
+// this implementation are no-ops until the first Meter implementation is set
+// as the global provider.
+//
+// The implementation here uses Mutexes to maintain a list of active Meters in
+// the MeterProvider and Instruments in each Meter, under the assumption that
+// these interfaces are not performance-critical.
+//
+// We have the invariant that setDelegate() will be called before a new
+// MeterProvider implementation is registered as the global provider. Mutexes
+// in the MeterProvider and Meters ensure that each instrument has a delegate
+// before the global provider is set.
+//
+// Bound instrument operations are implemented by delegating to the
+// instrument after it is registered, with a sync.Once initializer to
+// protect against races with Release().
+//
+// Metric uniqueness checking is implemented by calling the exported
+// methods of the api/metric/registry package.
+
+type meterKey struct {
+ Name, Version string
+}
+
+type meterProvider struct {
+ delegate metric.MeterProvider
+
+ // lock protects `delegate` and `meters`.
+ lock sync.Mutex
+
+ // meters maintains a unique entry for every named Meter
+ // that has been registered through the global instance.
+ meters map[meterKey]*meterEntry
+}
+
+type meterImpl struct {
+ delegate unsafe.Pointer // (*metric.MeterImpl)
+
+ lock sync.Mutex
+ syncInsts []*syncImpl
+ asyncInsts []*asyncImpl
+}
+
+type meterEntry struct {
+ unique metric.MeterImpl
+ impl meterImpl
+}
+
+type instrument struct {
+ descriptor metric.Descriptor
+}
+
+type syncImpl struct {
+ delegate unsafe.Pointer // (*metric.SyncImpl)
+
+ instrument
+}
+
+type asyncImpl struct {
+ delegate unsafe.Pointer // (*metric.AsyncImpl)
+
+ instrument
+
+ runner metric.AsyncRunner
+}
+
+// SyncImpler is implemented by all of the sync metric
+// instruments.
+type SyncImpler interface {
+ SyncImpl() metric.SyncImpl
+}
+
+// AsyncImpler is implemented by all of the async
+// metric instruments.
+type AsyncImpler interface {
+ AsyncImpl() metric.AsyncImpl
+}
+
+type syncHandle struct {
+ delegate unsafe.Pointer // (*metric.HandleImpl)
+
+ inst *syncImpl
+ labels []label.KeyValue
+
+ initialize sync.Once
+}
+
+var _ metric.MeterProvider = &meterProvider{}
+var _ metric.MeterImpl = &meterImpl{}
+var _ metric.InstrumentImpl = &syncImpl{}
+var _ metric.BoundSyncImpl = &syncHandle{}
+var _ metric.AsyncImpl = &asyncImpl{}
+
+func (inst *instrument) Descriptor() metric.Descriptor {
+ return inst.descriptor
+}
+
+// MeterProvider interface and delegation
+
+func newMeterProvider() *meterProvider {
+ return &meterProvider{
+ meters: map[meterKey]*meterEntry{},
+ }
+}
+
+func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.delegate = provider
+ for key, entry := range p.meters {
+ entry.impl.setDelegate(key.Name, key.Version, provider)
+ }
+ p.meters = nil
+}
+
+func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ if p.delegate != nil {
+ return p.delegate.Meter(instrumentationName, opts...)
+ }
+
+ key := meterKey{
+ Name: instrumentationName,
+ Version: metric.NewMeterConfig(opts...).InstrumentationVersion,
+ }
+ entry, ok := p.meters[key]
+ if !ok {
+ entry = &meterEntry{}
+ entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
+ p.meters[key] = entry
+
+ }
+ return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
+}
+
+// Meter interface and delegation
+
+func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ d := new(metric.MeterImpl)
+ *d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
+ m.delegate = unsafe.Pointer(d)
+
+ for _, inst := range m.syncInsts {
+ inst.setDelegate(*d)
+ }
+ m.syncInsts = nil
+ for _, obs := range m.asyncInsts {
+ obs.setDelegate(*d)
+ }
+ m.asyncInsts = nil
+}
+
+func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
+ return (*meterPtr).NewSyncInstrument(desc)
+ }
+
+ inst := &syncImpl{
+ instrument: instrument{
+ descriptor: desc,
+ },
+ }
+ m.syncInsts = append(m.syncInsts, inst)
+ return inst, nil
+}
+
+// Synchronous delegation
+
+func (inst *syncImpl) setDelegate(d metric.MeterImpl) {
+ implPtr := new(metric.SyncImpl)
+
+ var err error
+ *implPtr, err = d.NewSyncInstrument(inst.descriptor)
+
+ if err != nil {
+ // TODO: There is no standard way to deliver this error to the user.
+ // See https://github.com/open-telemetry/opentelemetry-go/issues/514
+ // Note that the default SDK will not generate any errors yet, this is
+ // only for added safety.
+ panic(err)
+ }
+
+ atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr))
+}
+
+func (inst *syncImpl) Implementation() interface{} {
+ if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
+ return (*implPtr).Implementation()
+ }
+ return inst
+}
+
+func (inst *syncImpl) Bind(labels []label.KeyValue) metric.BoundSyncImpl {
+ if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
+ return (*implPtr).Bind(labels)
+ }
+ return &syncHandle{
+ inst: inst,
+ labels: labels,
+ }
+}
+
+func (bound *syncHandle) Unbind() {
+ bound.initialize.Do(func() {})
+
+ implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
+
+ if implPtr == nil {
+ return
+ }
+
+ (*implPtr).Unbind()
+}
+
+// Async delegation
+
+func (m *meterImpl) NewAsyncInstrument(
+ desc metric.Descriptor,
+ runner metric.AsyncRunner,
+) (metric.AsyncImpl, error) {
+
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
+ return (*meterPtr).NewAsyncInstrument(desc, runner)
+ }
+
+ inst := &asyncImpl{
+ instrument: instrument{
+ descriptor: desc,
+ },
+ runner: runner,
+ }
+ m.asyncInsts = append(m.asyncInsts, inst)
+ return inst, nil
+}
+
+func (obs *asyncImpl) Implementation() interface{} {
+ if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil {
+ return (*implPtr).Implementation()
+ }
+ return obs
+}
+
+func (obs *asyncImpl) setDelegate(d metric.MeterImpl) {
+ implPtr := new(metric.AsyncImpl)
+
+ var err error
+ *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner)
+
+ if err != nil {
+ // TODO: There is no standard way to deliver this error to the user.
+ // See https://github.com/open-telemetry/opentelemetry-go/issues/514
+ // Note that the default SDK will not generate any errors yet, this is
+ // only for added safety.
+ panic(err)
+ }
+
+ atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr))
+}
+
+// Metric updates
+
+func (m *meterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, measurements ...metric.Measurement) {
+ if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil {
+ (*delegatePtr).RecordBatch(ctx, labels, measurements...)
+ }
+}
+
+func (inst *syncImpl) RecordOne(ctx context.Context, number metric.Number, labels []label.KeyValue) {
+ if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil {
+ (*instPtr).RecordOne(ctx, number, labels)
+ }
+}
+
+// Bound instrument initialization
+
+func (bound *syncHandle) RecordOne(ctx context.Context, number metric.Number) {
+ instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate))
+ if instPtr == nil {
+ return
+ }
+ var implPtr *metric.BoundSyncImpl
+ bound.initialize.Do(func() {
+ implPtr = new(metric.BoundSyncImpl)
+ *implPtr = (*instPtr).Bind(bound.labels)
+ atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr))
+ })
+ if implPtr == nil {
+ implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
+ }
+ // This may still be nil if instrument was created and bound
+ // without a delegate, then the instrument was set to have a
+ // delegate and unbound.
+ if implPtr == nil {
+ return
+ }
+ (*implPtr).RecordOne(ctx, number)
+}
+
+func AtomicFieldOffsets() map[string]uintptr {
+ return map[string]uintptr{
+ "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate),
+ "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate),
+ "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate),
+ "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate),
+ "syncHandle.delegate": unsafe.Offsetof(syncHandle{}.delegate),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/state.go b/vendor/go.opentelemetry.io/otel/api/global/internal/state.go
new file mode 100644
index 0000000..ecdfd75
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/internal/state.go
@@ -0,0 +1,134 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/api/metric"
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+type (
+ tracerProviderHolder struct {
+ tp trace.TracerProvider
+ }
+
+ meterProviderHolder struct {
+ mp metric.MeterProvider
+ }
+
+ propagatorsHolder struct {
+ tm otel.TextMapPropagator
+ }
+)
+
+var (
+ globalTracer = defaultTracerValue()
+ globalMeter = defaultMeterValue()
+ globalPropagators = defaultPropagatorsValue()
+
+ delegateMeterOnce sync.Once
+ delegateTraceOnce sync.Once
+)
+
+// TracerProvider is the internal implementation for global.TracerProvider.
+func TracerProvider() trace.TracerProvider {
+ return globalTracer.Load().(tracerProviderHolder).tp
+}
+
+// SetTracerProvider is the internal implementation for global.SetTracerProvider.
+func SetTracerProvider(tp trace.TracerProvider) {
+ delegateTraceOnce.Do(func() {
+ current := TracerProvider()
+ if current == tp {
+ // Setting the provider to the prior default is nonsense, panic.
+ // Panic is acceptable because we are likely still early in the
+ // process lifetime.
+ panic("invalid TracerProvider, the global instance cannot be reinstalled")
+ } else if def, ok := current.(*tracerProvider); ok {
+ def.setDelegate(tp)
+ }
+
+ })
+ globalTracer.Store(tracerProviderHolder{tp: tp})
+}
+
+// MeterProvider is the internal implementation for global.MeterProvider.
+func MeterProvider() metric.MeterProvider {
+ return globalMeter.Load().(meterProviderHolder).mp
+}
+
+// SetMeterProvider is the internal implementation for global.SetMeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+ delegateMeterOnce.Do(func() {
+ current := MeterProvider()
+
+ if current == mp {
+ // Setting the provider to the prior default is nonsense, panic.
+ // Panic is acceptable because we are likely still early in the
+ // process lifetime.
+ panic("invalid MeterProvider, the global instance cannot be reinstalled")
+ } else if def, ok := current.(*meterProvider); ok {
+ def.setDelegate(mp)
+ }
+ })
+ globalMeter.Store(meterProviderHolder{mp: mp})
+}
+
+// TextMapPropagator is the internal implementation for global.TextMapPropagator.
+func TextMapPropagator() otel.TextMapPropagator {
+ return globalPropagators.Load().(propagatorsHolder).tm
+}
+
+// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
+func SetTextMapPropagator(p otel.TextMapPropagator) {
+ globalPropagators.Store(propagatorsHolder{tm: p})
+}
+
+func defaultTracerValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(tracerProviderHolder{tp: &tracerProvider{}})
+ return v
+}
+
+func defaultMeterValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(meterProviderHolder{mp: newMeterProvider()})
+ return v
+}
+
+func defaultPropagatorsValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(propagatorsHolder{tm: getDefaultTextMapPropagator()})
+ return v
+}
+
+// getDefaultTextMapPropagator returns the default TextMapPropagator,
+// configured with W3C trace and baggage propagation.
+func getDefaultTextMapPropagator() otel.TextMapPropagator {
+ return otel.NewCompositeTextMapPropagator()
+}
+
+// ResetForTest restores the initial global state, for testing purposes.
+func ResetForTest() {
+ globalTracer = defaultTracerValue()
+ globalMeter = defaultMeterValue()
+ globalPropagators = defaultPropagatorsValue()
+ delegateMeterOnce = sync.Once{}
+ delegateTraceOnce = sync.Once{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go b/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go
new file mode 100644
index 0000000..bcd1461
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go
@@ -0,0 +1,128 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+/*
+This file contains the forwarding implementation of the TracerProvider used as
+the default global instance. Prior to initialization of an SDK, Tracers
+returned by the global TracerProvider will provide no-op functionality. This
+means that all Span created prior to initialization are no-op Spans.
+
+Once an SDK has been initialized, all provided no-op Tracers are swapped for
+Tracers provided by the SDK defined TracerProvider. However, any Span started
+prior to this initialization does not change its behavior. Meaning, the Span
+remains a no-op Span.
+
+The implementation to track and swap Tracers locks all new Tracer creation
+until the swap is complete. This assumes that this operation is not
+performance-critical. If that assumption is incorrect, be sure to configure an
+SDK prior to any Tracer creation.
+*/
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/api/trace"
+ "go.opentelemetry.io/otel/internal/trace/noop"
+)
+
+// tracerProvider is a placeholder for a configured SDK TracerProvider.
+//
+// All TracerProvider functionality is forwarded to a delegate once
+// configured.
+type tracerProvider struct {
+ mtx sync.Mutex
+ tracers []*tracer
+
+ delegate trace.TracerProvider
+}
+
+// Compile-time guarantee that tracerProvider implements the TracerProvider
+// interface.
+var _ trace.TracerProvider = &tracerProvider{}
+
+// setDelegate configures p to delegate all TracerProvider functionality to
+// provider.
+//
+// All Tracers provided prior to this function call are switched out to be
+// Tracers provided by provider.
+//
+// Delegation only happens on the first call to this method. All subsequent
+// calls result in no delegation changes.
+func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
+ if p.delegate != nil {
+ return
+ }
+
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.delegate = provider
+ for _, t := range p.tracers {
+ t.setDelegate(provider)
+ }
+
+ p.tracers = nil
+}
+
+// Tracer implements TracerProvider.
+func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ if p.delegate != nil {
+ return p.delegate.Tracer(name)
+ }
+
+ t := &tracer{name: name, opts: opts}
+ p.tracers = append(p.tracers, t)
+ return t
+}
+
+// tracer is a placeholder for a trace.Tracer.
+//
+// All Tracer functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopTracer.
+type tracer struct {
+ once sync.Once
+ name string
+ opts []trace.TracerOption
+
+ delegate trace.Tracer
+}
+
+// Compile-time guarantee that tracer implements the trace.Tracer interface.
+var _ trace.Tracer = &tracer{}
+
+// setDelegate configures t to delegate all Tracer functionality to Tracers
+// created by provider.
+//
+// All subsequent calls to the Tracer methods will be passed to the delegate.
+//
+// Delegation only happens on the first call to this method. All subsequent
+// calls result in no delegation changes.
+func (t *tracer) setDelegate(provider trace.TracerProvider) {
+ t.once.Do(func() { t.delegate = provider.Tracer(t.name, t.opts...) })
+}
+
+// Start implements trace.Tracer by forwarding the call to t.delegate if
+// set, otherwise it forwards the call to a NoopTracer.
+func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanOption) (context.Context, trace.Span) {
+ if t.delegate != nil {
+ return t.delegate.Start(ctx, name, opts...)
+ }
+ return noop.Tracer.Start(ctx, name, opts...)
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/metric.go b/vendor/go.opentelemetry.io/otel/api/global/metric.go
new file mode 100644
index 0000000..f1695bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/metric.go
@@ -0,0 +1,49 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global
+
+import (
+ "go.opentelemetry.io/otel/api/global/internal"
+ "go.opentelemetry.io/otel/api/metric"
+)
+
+// Meter creates an implementation of the Meter interface from the global
+// MeterProvider. The instrumentationName must be the name of the library
+// providing instrumentation. This name may be the same as the instrumented
+// code only if that code provides built-in instrumentation. If the
+// instrumentationName is empty, then a implementation defined default name
+// will be used instead.
+//
+// This is short for MeterProvider().Meter(name)
+func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
+ return MeterProvider().Meter(instrumentationName, opts...)
+}
+
+// MeterProvider returns the registered global meter provider. If
+// none is registered then a default meter provider is returned that
+// forwards the Meter interface to the first registered Meter.
+//
+// Use the meter provider to create a named meter. E.g.
+// meter := global.MeterProvider().Meter("example.com/foo")
+// or
+// meter := global.Meter("example.com/foo")
+func MeterProvider() metric.MeterProvider {
+ return internal.MeterProvider()
+}
+
+// SetMeterProvider registers `mp` as the global meter provider.
+func SetMeterProvider(mp metric.MeterProvider) {
+ internal.SetMeterProvider(mp)
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/propagation.go b/vendor/go.opentelemetry.io/otel/api/global/propagation.go
new file mode 100644
index 0000000..c5f4c2b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/propagation.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global
+
+import (
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/api/global/internal"
+)
+
+// TextMapPropagator returns the global TextMapPropagator. If none has been
+// set, a No-Op TextMapPropagator is returned.
+func TextMapPropagator() otel.TextMapPropagator {
+ return internal.TextMapPropagator()
+}
+
+// SetTextMapPropagator sets propagator as the global TSetTextMapPropagator.
+func SetTextMapPropagator(propagator otel.TextMapPropagator) {
+ internal.SetTextMapPropagator(propagator)
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/trace.go b/vendor/go.opentelemetry.io/otel/api/global/trace.go
new file mode 100644
index 0000000..49a7543
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/global/trace.go
@@ -0,0 +1,44 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package global
+
+import (
+ "go.opentelemetry.io/otel/api/global/internal"
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+// Tracer creates a named tracer that implements Tracer interface.
+// If the name is an empty string then provider uses default name.
+//
+// This is short for TracerProvider().Tracer(name)
+func Tracer(name string) trace.Tracer {
+ return TracerProvider().Tracer(name)
+}
+
+// TracerProvider returns the registered global trace provider.
+// If none is registered then an instance of NoopTracerProvider is returned.
+//
+// Use the trace provider to create a named tracer. E.g.
+// tracer := global.TracerProvider().Tracer("example.com/foo")
+// or
+// tracer := global.Tracer("example.com/foo")
+func TracerProvider() trace.TracerProvider {
+ return internal.TracerProvider()
+}
+
+// SetTracerProvider registers `tp` as the global trace provider.
+func SetTracerProvider(tp trace.TracerProvider) {
+ internal.SetTracerProvider(tp)
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/async.go b/vendor/go.opentelemetry.io/otel/api/metric/async.go
new file mode 100644
index 0000000..d0d488d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/async.go
@@ -0,0 +1,217 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+// The file is organized as follows:
+//
+// - Observation type
+// - Three kinds of Observer callback (int64, float64, batch)
+// - Three kinds of Observer result (int64, float64, batch)
+// - Three kinds of Observe() function (int64, float64, batch)
+// - Three kinds of AsyncRunner interface (abstract, single, batch)
+// - Two kinds of Observer constructor (int64, float64)
+// - Two kinds of Observation() function (int64, float64)
+// - Various internals
+
+// Observation is used for reporting an asynchronous batch of metric
+// values. Instances of this type should be created by asynchronous
+// instruments (e.g., Int64ValueObserver.Observation()).
+type Observation struct {
+ // number needs to be aligned for 64-bit atomic operations.
+ number Number
+ instrument AsyncImpl
+}
+
+// Int64ObserverFunc is a type of callback that integral
+// observers run.
+type Int64ObserverFunc func(context.Context, Int64ObserverResult)
+
+// Float64ObserverFunc is a type of callback that floating point
+// observers run.
+type Float64ObserverFunc func(context.Context, Float64ObserverResult)
+
+// BatchObserverFunc is a callback argument for use with any
+// Observer instrument that will be reported as a batch of
+// observations.
+type BatchObserverFunc func(context.Context, BatchObserverResult)
+
+// Int64ObserverResult is passed to an observer callback to capture
+// observations for one asynchronous integer metric instrument.
+type Int64ObserverResult struct {
+ instrument AsyncImpl
+ function func([]label.KeyValue, ...Observation)
+}
+
+// Float64ObserverResult is passed to an observer callback to capture
+// observations for one asynchronous floating point metric instrument.
+type Float64ObserverResult struct {
+ instrument AsyncImpl
+ function func([]label.KeyValue, ...Observation)
+}
+
+// BatchObserverResult is passed to a batch observer callback to
+// capture observations for multiple asynchronous instruments.
+type BatchObserverResult struct {
+ function func([]label.KeyValue, ...Observation)
+}
+
+// Observe captures a single integer value from the associated
+// instrument callback, with the given labels.
+func (ir Int64ObserverResult) Observe(value int64, labels ...label.KeyValue) {
+ ir.function(labels, Observation{
+ instrument: ir.instrument,
+ number: NewInt64Number(value),
+ })
+}
+
+// Observe captures a single floating point value from the associated
+// instrument callback, with the given labels.
+func (fr Float64ObserverResult) Observe(value float64, labels ...label.KeyValue) {
+ fr.function(labels, Observation{
+ instrument: fr.instrument,
+ number: NewFloat64Number(value),
+ })
+}
+
+// Observe captures a multiple observations from the associated batch
+// instrument callback, with the given labels.
+func (br BatchObserverResult) Observe(labels []label.KeyValue, obs ...Observation) {
+ br.function(labels, obs...)
+}
+
+// AsyncRunner is expected to convert into an AsyncSingleRunner or an
+// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner
+// does not satisfy one of these interfaces.
+type AsyncRunner interface {
+ // AnyRunner() is a non-exported method with no functional use
+ // other than to make this a non-empty interface.
+ AnyRunner()
+}
+
+// AsyncSingleRunner is an interface implemented by single-observer
+// callbacks.
+type AsyncSingleRunner interface {
+ // Run accepts a single instrument and function for capturing
+ // observations of that instrument. Each call to the function
+ // receives one captured observation. (The function accepts
+ // multiple observations so the same implementation can be
+ // used for batch runners.)
+ Run(ctx context.Context, single AsyncImpl, capture func([]label.KeyValue, ...Observation))
+
+ AsyncRunner
+}
+
+// AsyncBatchRunner is an interface implemented by batch-observer
+// callbacks.
+type AsyncBatchRunner interface {
+ // Run accepts a function for capturing observations of
+ // multiple instruments.
+ Run(ctx context.Context, capture func([]label.KeyValue, ...Observation))
+
+ AsyncRunner
+}
+
+var _ AsyncSingleRunner = (*Int64ObserverFunc)(nil)
+var _ AsyncSingleRunner = (*Float64ObserverFunc)(nil)
+var _ AsyncBatchRunner = (*BatchObserverFunc)(nil)
+
+// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments.
+func newInt64AsyncRunner(c Int64ObserverFunc) AsyncSingleRunner {
+ return &c
+}
+
+// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments.
+func newFloat64AsyncRunner(c Float64ObserverFunc) AsyncSingleRunner {
+ return &c
+}
+
+// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments.
+func newBatchAsyncRunner(c BatchObserverFunc) AsyncBatchRunner {
+ return &c
+}
+
+// AnyRunner implements AsyncRunner.
+func (*Int64ObserverFunc) AnyRunner() {}
+
+// AnyRunner implements AsyncRunner.
+func (*Float64ObserverFunc) AnyRunner() {}
+
+// AnyRunner implements AsyncRunner.
+func (*BatchObserverFunc) AnyRunner() {}
+
+// Run implements AsyncSingleRunner.
+func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
+ (*i)(ctx, Int64ObserverResult{
+ instrument: impl,
+ function: function,
+ })
+}
+
+// Run implements AsyncSingleRunner.
+func (f *Float64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
+ (*f)(ctx, Float64ObserverResult{
+ instrument: impl,
+ function: function,
+ })
+}
+
+// Run implements AsyncBatchRunner.
+func (b *BatchObserverFunc) Run(ctx context.Context, function func([]label.KeyValue, ...Observation)) {
+ (*b)(ctx, BatchObserverResult{
+ function: function,
+ })
+}
+
+// wrapInt64ValueObserverInstrument converts an AsyncImpl into Int64ValueObserver.
+func wrapInt64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Int64ValueObserver, error) {
+ common, err := checkNewAsync(asyncInst, err)
+ return Int64ValueObserver{asyncInstrument: common}, err
+}
+
+// wrapFloat64ValueObserverInstrument converts an AsyncImpl into Float64ValueObserver.
+func wrapFloat64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Float64ValueObserver, error) {
+ common, err := checkNewAsync(asyncInst, err)
+ return Float64ValueObserver{asyncInstrument: common}, err
+}
+
+// wrapInt64SumObserverInstrument converts an AsyncImpl into Int64SumObserver.
+func wrapInt64SumObserverInstrument(asyncInst AsyncImpl, err error) (Int64SumObserver, error) {
+ common, err := checkNewAsync(asyncInst, err)
+ return Int64SumObserver{asyncInstrument: common}, err
+}
+
+// wrapFloat64SumObserverInstrument converts an AsyncImpl into Float64SumObserver.
+func wrapFloat64SumObserverInstrument(asyncInst AsyncImpl, err error) (Float64SumObserver, error) {
+ common, err := checkNewAsync(asyncInst, err)
+ return Float64SumObserver{asyncInstrument: common}, err
+}
+
+// wrapInt64UpDownSumObserverInstrument converts an AsyncImpl into Int64UpDownSumObserver.
+func wrapInt64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Int64UpDownSumObserver, error) {
+ common, err := checkNewAsync(asyncInst, err)
+ return Int64UpDownSumObserver{asyncInstrument: common}, err
+}
+
+// wrapFloat64UpDownSumObserverInstrument converts an AsyncImpl into Float64UpDownSumObserver.
+func wrapFloat64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Float64UpDownSumObserver, error) {
+ common, err := checkNewAsync(asyncInst, err)
+ return Float64UpDownSumObserver{asyncInstrument: common}, err
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/config.go b/vendor/go.opentelemetry.io/otel/api/metric/config.go
new file mode 100644
index 0000000..3cd8fe8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/config.go
@@ -0,0 +1,125 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import "go.opentelemetry.io/otel/unit"
+
+// InstrumentConfig contains options for instrument descriptors.
+type InstrumentConfig struct {
+ // Description describes the instrument in human-readable terms.
+ Description string
+ // Unit describes the measurement unit for a instrument.
+ Unit unit.Unit
+ // InstrumentationName is the name of the library providing
+ // instrumentation.
+ InstrumentationName string
+ // InstrumentationVersion is the version of the library providing
+ // instrumentation.
+ InstrumentationVersion string
+}
+
+// InstrumentOption is an interface for applying instrument options.
+type InstrumentOption interface {
+ // ApplyMeter is used to set a InstrumentOption value of a
+ // InstrumentConfig.
+ ApplyInstrument(*InstrumentConfig)
+}
+
+// NewInstrumentConfig creates a new InstrumentConfig
+// and applies all the given options.
+func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig {
+ var config InstrumentConfig
+ for _, o := range opts {
+ o.ApplyInstrument(&config)
+ }
+ return config
+}
+
+// WithDescription applies provided description.
+func WithDescription(desc string) InstrumentOption {
+ return descriptionOption(desc)
+}
+
+type descriptionOption string
+
+func (d descriptionOption) ApplyInstrument(config *InstrumentConfig) {
+ config.Description = string(d)
+}
+
+// WithUnit applies provided unit.
+func WithUnit(unit unit.Unit) InstrumentOption {
+ return unitOption(unit)
+}
+
+type unitOption unit.Unit
+
+func (u unitOption) ApplyInstrument(config *InstrumentConfig) {
+ config.Unit = unit.Unit(u)
+}
+
+// WithInstrumentationName sets the instrumentation name.
+func WithInstrumentationName(name string) InstrumentOption {
+ return instrumentationNameOption(name)
+}
+
+type instrumentationNameOption string
+
+func (i instrumentationNameOption) ApplyInstrument(config *InstrumentConfig) {
+ config.InstrumentationName = string(i)
+}
+
+// MeterConfig contains options for Meters.
+type MeterConfig struct {
+ // InstrumentationVersion is the version of the library providing
+ // instrumentation.
+ InstrumentationVersion string
+}
+
+// MeterOption is an interface for applying Meter options.
+type MeterOption interface {
+ // ApplyMeter is used to set a MeterOption value of a MeterConfig.
+ ApplyMeter(*MeterConfig)
+}
+
+// NewMeterConfig creates a new MeterConfig and applies
+// all the given options.
+func NewMeterConfig(opts ...MeterOption) MeterConfig {
+ var config MeterConfig
+ for _, o := range opts {
+ o.ApplyMeter(&config)
+ }
+ return config
+}
+
+// Option is an interface for applying Instrument or Meter options.
+type Option interface {
+ InstrumentOption
+ MeterOption
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) Option {
+ return instrumentationVersionOption(version)
+}
+
+type instrumentationVersionOption string
+
+func (i instrumentationVersionOption) ApplyMeter(config *MeterConfig) {
+ config.InstrumentationVersion = string(i)
+}
+
+func (i instrumentationVersionOption) ApplyInstrument(config *InstrumentConfig) {
+ config.InstrumentationVersion = string(i)
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/counter.go b/vendor/go.opentelemetry.io/otel/api/metric/counter.go
new file mode 100644
index 0000000..c03421d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/counter.go
@@ -0,0 +1,95 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+// Float64Counter is a metric that accumulates float64 values.
+type Float64Counter struct {
+ syncInstrument
+}
+
+// Int64Counter is a metric that accumulates int64 values.
+type Int64Counter struct {
+ syncInstrument
+}
+
+// BoundFloat64Counter is a bound instrument for Float64Counter.
+//
+// It inherits the Unbind function from syncBoundInstrument.
+type BoundFloat64Counter struct {
+ syncBoundInstrument
+}
+
+// BoundInt64Counter is a boundInstrument for Int64Counter.
+//
+// It inherits the Unbind function from syncBoundInstrument.
+type BoundInt64Counter struct {
+ syncBoundInstrument
+}
+
+// Bind creates a bound instrument for this counter. The labels are
+// associated with values recorded via subsequent calls to Record.
+func (c Float64Counter) Bind(labels ...label.KeyValue) (h BoundFloat64Counter) {
+ h.syncBoundInstrument = c.bind(labels)
+ return
+}
+
+// Bind creates a bound instrument for this counter. The labels are
+// associated with values recorded via subsequent calls to Record.
+func (c Int64Counter) Bind(labels ...label.KeyValue) (h BoundInt64Counter) {
+ h.syncBoundInstrument = c.bind(labels)
+ return
+}
+
+// Measurement creates a Measurement object to use with batch
+// recording.
+func (c Float64Counter) Measurement(value float64) Measurement {
+ return c.float64Measurement(value)
+}
+
+// Measurement creates a Measurement object to use with batch
+// recording.
+func (c Int64Counter) Measurement(value int64) Measurement {
+ return c.int64Measurement(value)
+}
+
+// Add adds the value to the counter's sum. The labels should contain
+// the keys and values to be associated with this value.
+func (c Float64Counter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
+ c.directRecord(ctx, NewFloat64Number(value), labels)
+}
+
+// Add adds the value to the counter's sum. The labels should contain
+// the keys and values to be associated with this value.
+func (c Int64Counter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
+ c.directRecord(ctx, NewInt64Number(value), labels)
+}
+
+// Add adds the value to the counter's sum using the labels
+// previously bound to this counter via Bind()
+func (b BoundFloat64Counter) Add(ctx context.Context, value float64) {
+ b.directRecord(ctx, NewFloat64Number(value))
+}
+
+// Add adds the value to the counter's sum using the labels
+// previously bound to this counter via Bind()
+func (b BoundInt64Counter) Add(ctx context.Context, value int64) {
+ b.directRecord(ctx, NewInt64Number(value))
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go b/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go
new file mode 100644
index 0000000..3af55e5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import "go.opentelemetry.io/otel/unit"
+
+// Descriptor contains all the settings that describe an instrument,
+// including its name, metric kind, number kind, and the configurable
+// options.
+type Descriptor struct {
+ name string
+ kind Kind
+ numberKind NumberKind
+ config InstrumentConfig
+}
+
+// NewDescriptor returns a Descriptor with the given contents.
+func NewDescriptor(name string, mkind Kind, nkind NumberKind, opts ...InstrumentOption) Descriptor {
+ return Descriptor{
+ name: name,
+ kind: mkind,
+ numberKind: nkind,
+ config: NewInstrumentConfig(opts...),
+ }
+}
+
+// Name returns the metric instrument's name.
+func (d Descriptor) Name() string {
+ return d.name
+}
+
+// MetricKind returns the specific kind of instrument.
+func (d Descriptor) MetricKind() Kind {
+ return d.kind
+}
+
+// Description provides a human-readable description of the metric
+// instrument.
+func (d Descriptor) Description() string {
+ return d.config.Description
+}
+
+// Unit describes the units of the metric instrument. Unitless
+// metrics return the empty string.
+func (d Descriptor) Unit() unit.Unit {
+ return d.config.Unit
+}
+
+// NumberKind returns whether this instrument is declared over int64,
+// float64, or uint64 values.
+func (d Descriptor) NumberKind() NumberKind {
+ return d.numberKind
+}
+
+// InstrumentationName returns the name of the library that provided
+// instrumentation for this instrument.
+func (d Descriptor) InstrumentationName() string {
+ return d.config.InstrumentationName
+}
+
+// InstrumentationVersion returns the version of the library that provided
+// instrumentation for this instrument.
+func (d Descriptor) InstrumentationVersion() string {
+ return d.config.InstrumentationVersion
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/doc.go b/vendor/go.opentelemetry.io/otel/api/metric/doc.go
new file mode 100644
index 0000000..48a59c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/doc.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metric provides support for reporting measurements using instruments.
+//
+// Instruments are categorized as below:
+//
+// Synchronous instruments are called by the user with a Context.
+// Asynchronous instruments are called by the SDK during collection.
+//
+// Additive instruments are semantically intended for capturing a sum.
+// Non-additive instruments are intended for capturing a distribution.
+//
+// Additive instruments may be monotonic, in which case they are
+// non-descreasing and naturally define a rate.
+//
+// The synchronous instrument names are:
+//
+// Counter: additive, monotonic
+// UpDownCounter: additive
+// ValueRecorder: non-additive
+//
+// and the asynchronous instruments are:
+//
+// SumObserver: additive, monotonic
+// UpDownSumObserver: additive
+// ValueObserver: non-additive
+//
+// All instruments are provided with support for either float64 or
+// int64 input values.
+//
+// The Meter interface supports allocating new instruments as well as
+// interfaces for recording batches of synchronous measurements or
+// asynchronous observations. To obtain a Meter, use a MeterProvider.
+//
+// The MeterProvider interface supports obtaining a named Meter interface. To
+// obtain a MeterProvider implementation, initialize and configure any
+// compatible SDK.
+package metric // import "go.opentelemetry.io/otel/api/metric"
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/kind.go b/vendor/go.opentelemetry.io/otel/api/metric/kind.go
new file mode 100644
index 0000000..9d4b453
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/kind.go
@@ -0,0 +1,79 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate stringer -type=Kind
+
+package metric
+
+// Kind describes the kind of instrument.
+type Kind int8
+
+const (
+ // ValueRecorderKind indicates a ValueRecorder instrument.
+ ValueRecorderKind Kind = iota
+ // ValueObserverKind indicates an ValueObserver instrument.
+ ValueObserverKind
+
+ // CounterKind indicates a Counter instrument.
+ CounterKind
+ // UpDownCounterKind indicates a UpDownCounter instrument.
+ UpDownCounterKind
+
+ // SumObserverKind indicates a SumObserver instrument.
+ SumObserverKind
+ // UpDownSumObserverKind indicates a UpDownSumObserver instrument.
+ UpDownSumObserverKind
+)
+
+// Synchronous returns whether this is a synchronous kind of instrument.
+func (k Kind) Synchronous() bool {
+ switch k {
+ case CounterKind, UpDownCounterKind, ValueRecorderKind:
+ return true
+ }
+ return false
+}
+
+// Asynchronous returns whether this is an asynchronous kind of instrument.
+func (k Kind) Asynchronous() bool {
+ return !k.Synchronous()
+}
+
+// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
+func (k Kind) Adding() bool {
+ switch k {
+ case CounterKind, UpDownCounterKind, SumObserverKind, UpDownSumObserverKind:
+ return true
+ }
+ return false
+}
+
+// Adding returns whether this kind of instrument groups its inputs (as opposed to Adding).
+func (k Kind) Grouping() bool {
+ return !k.Adding()
+}
+
+// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
+func (k Kind) Monotonic() bool {
+ switch k {
+ case CounterKind, SumObserverKind:
+ return true
+ }
+ return false
+}
+
+// Cumulative returns whether this kind of instrument receives precomputed sums.
+func (k Kind) PrecomputedSum() bool {
+ return k.Adding() && k.Asynchronous()
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go b/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go
new file mode 100644
index 0000000..eb1a0d5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=Kind"; DO NOT EDIT.
+
+package metric
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ValueRecorderKind-0]
+ _ = x[ValueObserverKind-1]
+ _ = x[CounterKind-2]
+ _ = x[UpDownCounterKind-3]
+ _ = x[SumObserverKind-4]
+ _ = x[UpDownSumObserverKind-5]
+}
+
+const _Kind_name = "ValueRecorderKindValueObserverKindCounterKindUpDownCounterKindSumObserverKindUpDownSumObserverKind"
+
+var _Kind_index = [...]uint8{0, 17, 34, 45, 62, 77, 98}
+
+func (i Kind) String() string {
+ if i < 0 || i >= Kind(len(_Kind_index)-1) {
+ return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/meter.go b/vendor/go.opentelemetry.io/otel/api/metric/meter.go
new file mode 100644
index 0000000..d174913
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/meter.go
@@ -0,0 +1,320 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+// The file is organized as follows:
+//
+// - MeterProvider interface
+// - Meter struct
+// - RecordBatch
+// - BatchObserver
+// - Synchronous instrument constructors (2 x int64,float64)
+// - Asynchronous instrument constructors (1 x int64,float64)
+// - Batch asynchronous constructors (1 x int64,float64)
+// - Internals
+
+// MeterProvider supports named Meter instances.
+type MeterProvider interface {
+ // Meter creates an implementation of the Meter interface.
+ // The instrumentationName must be the name of the library providing
+ // instrumentation. This name may be the same as the instrumented code
+ // only if that code provides built-in instrumentation. If the
+ // instrumentationName is empty, then a implementation defined default
+ // name will be used instead.
+ Meter(instrumentationName string, opts ...MeterOption) Meter
+}
+
+// Meter is the OpenTelemetry metric API, based on a `MeterImpl`
+// implementation and the `Meter` library name.
+//
+// An uninitialized Meter is a no-op implementation.
+type Meter struct {
+ impl MeterImpl
+ name, version string
+}
+
+// RecordBatch atomically records a batch of measurements.
+func (m Meter) RecordBatch(ctx context.Context, ls []label.KeyValue, ms ...Measurement) {
+ if m.impl == nil {
+ return
+ }
+ m.impl.RecordBatch(ctx, ls, ms...)
+}
+
+// NewBatchObserver creates a new BatchObserver that supports
+// making batches of observations for multiple instruments.
+func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver {
+ return BatchObserver{
+ meter: m,
+ runner: newBatchAsyncRunner(callback),
+ }
+}
+
+// NewInt64Counter creates a new integer Counter instrument with the
+// given name, customized with options. May return an error if the
+// name is invalid (e.g., empty) or improperly registered (e.g.,
+// duplicate registration).
+func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) {
+ return wrapInt64CounterInstrument(
+ m.newSync(name, CounterKind, Int64NumberKind, options))
+}
+
+// NewFloat64Counter creates a new floating point Counter with the
+// given name, customized with options. May return an error if the
+// name is invalid (e.g., empty) or improperly registered (e.g.,
+// duplicate registration).
+func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) {
+ return wrapFloat64CounterInstrument(
+ m.newSync(name, CounterKind, Float64NumberKind, options))
+}
+
+// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the
+// given name, customized with options. May return an error if the
+// name is invalid (e.g., empty) or improperly registered (e.g.,
+// duplicate registration).
+func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) {
+ return wrapInt64UpDownCounterInstrument(
+ m.newSync(name, UpDownCounterKind, Int64NumberKind, options))
+}
+
+// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the
+// given name, customized with options. May return an error if the
+// name is invalid (e.g., empty) or improperly registered (e.g.,
+// duplicate registration).
+func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) {
+ return wrapFloat64UpDownCounterInstrument(
+ m.newSync(name, UpDownCounterKind, Float64NumberKind, options))
+}
+
+// NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the
+// given name, customized with options. May return an error if the
+// name is invalid (e.g., empty) or improperly registered (e.g.,
+// duplicate registration).
+func (m Meter) NewInt64ValueRecorder(name string, opts ...InstrumentOption) (Int64ValueRecorder, error) {
+ return wrapInt64ValueRecorderInstrument(
+ m.newSync(name, ValueRecorderKind, Int64NumberKind, opts))
+}
+
+// NewFloat64ValueRecorder creates a new floating point ValueRecorder with the
+// given name, customized with options. May return an error if the
+// name is invalid (e.g., empty) or improperly registered (e.g.,
+// duplicate registration).
+func (m Meter) NewFloat64ValueRecorder(name string, opts ...InstrumentOption) (Float64ValueRecorder, error) {
+ return wrapFloat64ValueRecorderInstrument(
+ m.newSync(name, ValueRecorderKind, Float64NumberKind, opts))
+}
+
+// NewInt64ValueObserver creates a new integer ValueObserver instrument
+// with the given name, running a given callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (m Meter) NewInt64ValueObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64ValueObserver, error) {
+ if callback == nil {
+ return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapInt64ValueObserverInstrument(
+ m.newAsync(name, ValueObserverKind, Int64NumberKind, opts,
+ newInt64AsyncRunner(callback)))
+}
+
+// NewFloat64ValueObserver creates a new floating point ValueObserver with
+// the given name, running a given callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (m Meter) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64ValueObserver, error) {
+ if callback == nil {
+ return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapFloat64ValueObserverInstrument(
+ m.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
+ newFloat64AsyncRunner(callback)))
+}
+
+// NewInt64SumObserver creates a new integer SumObserver instrument
+// with the given name, running a given callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (m Meter) NewInt64SumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64SumObserver, error) {
+ if callback == nil {
+ return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapInt64SumObserverInstrument(
+ m.newAsync(name, SumObserverKind, Int64NumberKind, opts,
+ newInt64AsyncRunner(callback)))
+}
+
+// NewFloat64SumObserver creates a new floating point SumObserver with
+// the given name, running a given callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (m Meter) NewFloat64SumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64SumObserver, error) {
+ if callback == nil {
+ return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapFloat64SumObserverInstrument(
+ m.newAsync(name, SumObserverKind, Float64NumberKind, opts,
+ newFloat64AsyncRunner(callback)))
+}
+
+// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
+// with the given name, running a given callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (m Meter) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
+ if callback == nil {
+ return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapInt64UpDownSumObserverInstrument(
+ m.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts,
+ newInt64AsyncRunner(callback)))
+}
+
+// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
+// the given name, running a given callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (m Meter) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
+ if callback == nil {
+ return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapFloat64UpDownSumObserverInstrument(
+ m.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
+ newFloat64AsyncRunner(callback)))
+}
+
+// NewInt64ValueObserver creates a new integer ValueObserver instrument
+// with the given name, running in a batch callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (b BatchObserver) NewInt64ValueObserver(name string, opts ...InstrumentOption) (Int64ValueObserver, error) {
+ if b.runner == nil {
+ return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapInt64ValueObserverInstrument(
+ b.meter.newAsync(name, ValueObserverKind, Int64NumberKind, opts, b.runner))
+}
+
+// NewFloat64ValueObserver creates a new floating point ValueObserver with
+// the given name, running in a batch callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (b BatchObserver) NewFloat64ValueObserver(name string, opts ...InstrumentOption) (Float64ValueObserver, error) {
+ if b.runner == nil {
+ return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapFloat64ValueObserverInstrument(
+ b.meter.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
+ b.runner))
+}
+
+// NewInt64SumObserver creates a new integer SumObserver instrument
+// with the given name, running in a batch callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) {
+ if b.runner == nil {
+ return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapInt64SumObserverInstrument(
+ b.meter.newAsync(name, SumObserverKind, Int64NumberKind, opts, b.runner))
+}
+
+// NewFloat64SumObserver creates a new floating point SumObserver with
+// the given name, running in a batch callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (b BatchObserver) NewFloat64SumObserver(name string, opts ...InstrumentOption) (Float64SumObserver, error) {
+ if b.runner == nil {
+ return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapFloat64SumObserverInstrument(
+ b.meter.newAsync(name, SumObserverKind, Float64NumberKind, opts,
+ b.runner))
+}
+
+// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
+// with the given name, running in a batch callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (b BatchObserver) NewInt64UpDownSumObserver(name string, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
+ if b.runner == nil {
+ return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapInt64UpDownSumObserverInstrument(
+ b.meter.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts, b.runner))
+}
+
+// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
+// the given name, running in a batch callback, and customized with
+// options. May return an error if the name is invalid (e.g., empty)
+// or improperly registered (e.g., duplicate registration).
+func (b BatchObserver) NewFloat64UpDownSumObserver(name string, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
+ if b.runner == nil {
+ return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
+ }
+ return wrapFloat64UpDownSumObserverInstrument(
+ b.meter.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
+ b.runner))
+}
+
+// MeterImpl returns the underlying MeterImpl of this Meter.
+func (m Meter) MeterImpl() MeterImpl {
+ return m.impl
+}
+
+// newAsync constructs one new asynchronous instrument.
+func (m Meter) newAsync(
+ name string,
+ mkind Kind,
+ nkind NumberKind,
+ opts []InstrumentOption,
+ runner AsyncRunner,
+) (
+ AsyncImpl,
+ error,
+) {
+ if m.impl == nil {
+ return NoopAsync{}, nil
+ }
+ desc := NewDescriptor(name, mkind, nkind, opts...)
+ desc.config.InstrumentationName = m.name
+ desc.config.InstrumentationVersion = m.version
+ return m.impl.NewAsyncInstrument(desc, runner)
+}
+
+// newSync constructs one new synchronous instrument.
+func (m Meter) newSync(
+ name string,
+ metricKind Kind,
+ numberKind NumberKind,
+ opts []InstrumentOption,
+) (
+ SyncImpl,
+ error,
+) {
+ if m.impl == nil {
+ return NoopSync{}, nil
+ }
+ desc := NewDescriptor(name, metricKind, numberKind, opts...)
+ desc.config.InstrumentationName = m.name
+ desc.config.InstrumentationVersion = m.version
+ return m.impl.NewSyncInstrument(desc)
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/must.go b/vendor/go.opentelemetry.io/otel/api/metric/must.go
new file mode 100644
index 0000000..c88e050
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/must.go
@@ -0,0 +1,222 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// MeterMust is a wrapper for Meter interfaces that panics when any
+// instrument constructor encounters an error.
+type MeterMust struct {
+ meter Meter
+}
+
+// BatchObserverMust is a wrapper for BatchObserver that panics when
+// any instrument constructor encounters an error.
+type BatchObserverMust struct {
+ batch BatchObserver
+}
+
+// Must constructs a MeterMust implementation from a Meter, allowing
+// the application to panic when any instrument constructor yields an
+// error.
+func Must(meter Meter) MeterMust {
+ return MeterMust{meter: meter}
+}
+
+// NewInt64Counter calls `Meter.NewInt64Counter` and returns the
+// instrument, panicking if it encounters an error.
+func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter {
+ if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the
+// instrument, panicking if it encounters an error.
+func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter {
+ if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the
+// instrument, panicking if it encounters an error.
+func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter {
+ if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the
+// instrument, panicking if it encounters an error.
+func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter {
+ if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the
+// instrument, panicking if it encounters an error.
+func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...InstrumentOption) Int64ValueRecorder {
+ if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the
+// instrument, panicking if it encounters an error.
+func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...InstrumentOption) Float64ValueRecorder {
+ if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewInt64ValueObserver calls `Meter.NewInt64ValueObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (mm MeterMust) NewInt64ValueObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64ValueObserver {
+ if inst, err := mm.meter.NewInt64ValueObserver(name, callback, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64ValueObserver calls `Meter.NewFloat64ValueObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (mm MeterMust) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64ValueObserver {
+ if inst, err := mm.meter.NewFloat64ValueObserver(name, callback, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewInt64SumObserver calls `Meter.NewInt64SumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (mm MeterMust) NewInt64SumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64SumObserver {
+ if inst, err := mm.meter.NewInt64SumObserver(name, callback, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64SumObserver calls `Meter.NewFloat64SumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (mm MeterMust) NewFloat64SumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64SumObserver {
+ if inst, err := mm.meter.NewFloat64SumObserver(name, callback, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewInt64UpDownSumObserver calls `Meter.NewInt64UpDownSumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (mm MeterMust) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownSumObserver {
+ if inst, err := mm.meter.NewInt64UpDownSumObserver(name, callback, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64UpDownSumObserver calls `Meter.NewFloat64UpDownSumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (mm MeterMust) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownSumObserver {
+ if inst, err := mm.meter.NewFloat64UpDownSumObserver(name, callback, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewBatchObserver returns a wrapper around BatchObserver that panics
+// when any instrument constructor returns an error.
+func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust {
+ return BatchObserverMust{
+ batch: mm.meter.NewBatchObserver(callback),
+ }
+}
+
+// NewInt64ValueObserver calls `BatchObserver.NewInt64ValueObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (bm BatchObserverMust) NewInt64ValueObserver(name string, oos ...InstrumentOption) Int64ValueObserver {
+ if inst, err := bm.batch.NewInt64ValueObserver(name, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64ValueObserver calls `BatchObserver.NewFloat64ValueObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (bm BatchObserverMust) NewFloat64ValueObserver(name string, oos ...InstrumentOption) Float64ValueObserver {
+ if inst, err := bm.batch.NewFloat64ValueObserver(name, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewInt64SumObserver calls `BatchObserver.NewInt64SumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (bm BatchObserverMust) NewInt64SumObserver(name string, oos ...InstrumentOption) Int64SumObserver {
+ if inst, err := bm.batch.NewInt64SumObserver(name, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64SumObserver calls `BatchObserver.NewFloat64SumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (bm BatchObserverMust) NewFloat64SumObserver(name string, oos ...InstrumentOption) Float64SumObserver {
+ if inst, err := bm.batch.NewFloat64SumObserver(name, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewInt64UpDownSumObserver calls `BatchObserver.NewInt64UpDownSumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (bm BatchObserverMust) NewInt64UpDownSumObserver(name string, oos ...InstrumentOption) Int64UpDownSumObserver {
+ if inst, err := bm.batch.NewInt64UpDownSumObserver(name, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
+
+// NewFloat64UpDownSumObserver calls `BatchObserver.NewFloat64UpDownSumObserver` and
+// returns the instrument, panicking if it encounters an error.
+func (bm BatchObserverMust) NewFloat64UpDownSumObserver(name string, oos ...InstrumentOption) Float64UpDownSumObserver {
+ if inst, err := bm.batch.NewFloat64UpDownSumObserver(name, oos...); err != nil {
+ panic(err)
+ } else {
+ return inst
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/noop.go b/vendor/go.opentelemetry.io/otel/api/metric/noop.go
new file mode 100644
index 0000000..97867d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/noop.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+type NoopMeterProvider struct{}
+
+type noopInstrument struct{}
+type noopBoundInstrument struct{}
+type NoopSync struct{ noopInstrument }
+type NoopAsync struct{ noopInstrument }
+
+var _ MeterProvider = NoopMeterProvider{}
+var _ SyncImpl = NoopSync{}
+var _ BoundSyncImpl = noopBoundInstrument{}
+var _ AsyncImpl = NoopAsync{}
+
+func (NoopMeterProvider) Meter(_ string, _ ...MeterOption) Meter {
+ return Meter{}
+}
+
+func (noopInstrument) Implementation() interface{} {
+ return nil
+}
+
+func (noopInstrument) Descriptor() Descriptor {
+ return Descriptor{}
+}
+
+func (noopBoundInstrument) RecordOne(context.Context, Number) {
+}
+
+func (noopBoundInstrument) Unbind() {
+}
+
+func (NoopSync) Bind([]label.KeyValue) BoundSyncImpl {
+ return noopBoundInstrument{}
+}
+
+func (NoopSync) RecordOne(context.Context, Number, []label.KeyValue) {
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/number.go b/vendor/go.opentelemetry.io/otel/api/metric/number.go
new file mode 100644
index 0000000..c3ca0ed
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/number.go
@@ -0,0 +1,540 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+//go:generate stringer -type=NumberKind
+
+import (
+ "fmt"
+ "math"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/internal"
+)
+
+// NumberKind describes the data type of the Number.
+type NumberKind int8
+
+const (
+ // Int64NumberKind means that the Number stores int64.
+ Int64NumberKind NumberKind = iota
+ // Float64NumberKind means that the Number stores float64.
+ Float64NumberKind
+)
+
+// Zero returns a zero value for a given NumberKind
+func (k NumberKind) Zero() Number {
+ switch k {
+ case Int64NumberKind:
+ return NewInt64Number(0)
+ case Float64NumberKind:
+ return NewFloat64Number(0.)
+ default:
+ return Number(0)
+ }
+}
+
+// Minimum returns the minimum representable value
+// for a given NumberKind
+func (k NumberKind) Minimum() Number {
+ switch k {
+ case Int64NumberKind:
+ return NewInt64Number(math.MinInt64)
+ case Float64NumberKind:
+ return NewFloat64Number(-1. * math.MaxFloat64)
+ default:
+ return Number(0)
+ }
+}
+
+// Maximum returns the maximum representable value
+// for a given NumberKind
+func (k NumberKind) Maximum() Number {
+ switch k {
+ case Int64NumberKind:
+ return NewInt64Number(math.MaxInt64)
+ case Float64NumberKind:
+ return NewFloat64Number(math.MaxFloat64)
+ default:
+ return Number(0)
+ }
+}
+
+// Number represents either an integral or a floating point value. It
+// needs to be accompanied with a source of NumberKind that describes
+// the actual type of the value stored within Number.
+type Number uint64
+
+// - constructors
+
+// NewNumberFromRaw creates a new Number from a raw value.
+func NewNumberFromRaw(r uint64) Number {
+ return Number(r)
+}
+
+// NewInt64Number creates an integral Number.
+func NewInt64Number(i int64) Number {
+ return NewNumberFromRaw(internal.Int64ToRaw(i))
+}
+
+// NewFloat64Number creates a floating point Number.
+func NewFloat64Number(f float64) Number {
+ return NewNumberFromRaw(internal.Float64ToRaw(f))
+}
+
+// NewNumberSignChange returns a number with the same magnitude and
+// the opposite sign. `kind` must describe the kind of number in `nn`.
+//
+// Does not change Uint64NumberKind values.
+func NewNumberSignChange(kind NumberKind, nn Number) Number {
+ switch kind {
+ case Int64NumberKind:
+ return NewInt64Number(-nn.AsInt64())
+ case Float64NumberKind:
+ return NewFloat64Number(-nn.AsFloat64())
+ }
+ return nn
+}
+
+// - as x
+
+// AsNumber gets the Number.
+func (n *Number) AsNumber() Number {
+ return *n
+}
+
+// AsRaw gets the uninterpreted raw value. Might be useful for some
+// atomic operations.
+func (n *Number) AsRaw() uint64 {
+ return uint64(*n)
+}
+
+// AsInt64 assumes that the value contains an int64 and returns it as
+// such.
+func (n *Number) AsInt64() int64 {
+ return internal.RawToInt64(n.AsRaw())
+}
+
+// AsFloat64 assumes that the measurement value contains a float64 and
+// returns it as such.
+func (n *Number) AsFloat64() float64 {
+ return internal.RawToFloat64(n.AsRaw())
+}
+
+// - as x atomic
+
+// AsNumberAtomic gets the Number atomically.
+func (n *Number) AsNumberAtomic() Number {
+ return NewNumberFromRaw(n.AsRawAtomic())
+}
+
+// AsRawAtomic gets the uninterpreted raw value atomically. Might be
+// useful for some atomic operations.
+func (n *Number) AsRawAtomic() uint64 {
+ return atomic.LoadUint64(n.AsRawPtr())
+}
+
+// AsInt64Atomic assumes that the number contains an int64 and returns
+// it as such atomically.
+func (n *Number) AsInt64Atomic() int64 {
+ return atomic.LoadInt64(n.AsInt64Ptr())
+}
+
+// AsFloat64Atomic assumes that the measurement value contains a
+// float64 and returns it as such atomically.
+func (n *Number) AsFloat64Atomic() float64 {
+ return internal.RawToFloat64(n.AsRawAtomic())
+}
+
+// - as x ptr
+
+// AsRawPtr gets the pointer to the raw, uninterpreted raw
+// value. Might be useful for some atomic operations.
+func (n *Number) AsRawPtr() *uint64 {
+ return (*uint64)(n)
+}
+
+// AsInt64Ptr assumes that the number contains an int64 and returns a
+// pointer to it.
+func (n *Number) AsInt64Ptr() *int64 {
+ return internal.RawPtrToInt64Ptr(n.AsRawPtr())
+}
+
+// AsFloat64Ptr assumes that the number contains a float64 and returns a
+// pointer to it.
+func (n *Number) AsFloat64Ptr() *float64 {
+ return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
+}
+
+// - coerce
+
+// CoerceToInt64 casts the number to int64. May result in
+// data/precision loss.
+func (n *Number) CoerceToInt64(kind NumberKind) int64 {
+ switch kind {
+ case Int64NumberKind:
+ return n.AsInt64()
+ case Float64NumberKind:
+ return int64(n.AsFloat64())
+ default:
+ // you get what you deserve
+ return 0
+ }
+}
+
+// CoerceToFloat64 casts the number to float64. May result in
+// data/precision loss.
+func (n *Number) CoerceToFloat64(kind NumberKind) float64 {
+ switch kind {
+ case Int64NumberKind:
+ return float64(n.AsInt64())
+ case Float64NumberKind:
+ return n.AsFloat64()
+ default:
+ // you get what you deserve
+ return 0
+ }
+}
+
+// - set
+
+// SetNumber sets the number to the passed number. Both should be of
+// the same kind.
+func (n *Number) SetNumber(nn Number) {
+ *n.AsRawPtr() = nn.AsRaw()
+}
+
+// SetRaw sets the number to the passed raw value. Both number and the
+// raw number should represent the same kind.
+func (n *Number) SetRaw(r uint64) {
+ *n.AsRawPtr() = r
+}
+
+// SetInt64 assumes that the number contains an int64 and sets it to
+// the passed value.
+func (n *Number) SetInt64(i int64) {
+ *n.AsInt64Ptr() = i
+}
+
+// SetFloat64 assumes that the number contains a float64 and sets it
+// to the passed value.
+func (n *Number) SetFloat64(f float64) {
+ *n.AsFloat64Ptr() = f
+}
+
+// - set atomic
+
+// SetNumberAtomic sets the number to the passed number
+// atomically. Both should be of the same kind.
+func (n *Number) SetNumberAtomic(nn Number) {
+ atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
+}
+
+// SetRawAtomic sets the number to the passed raw value
+// atomically. Both number and the raw number should represent the
+// same kind.
+func (n *Number) SetRawAtomic(r uint64) {
+ atomic.StoreUint64(n.AsRawPtr(), r)
+}
+
+// SetInt64Atomic assumes that the number contains an int64 and sets
+// it to the passed value atomically.
+func (n *Number) SetInt64Atomic(i int64) {
+ atomic.StoreInt64(n.AsInt64Ptr(), i)
+}
+
+// SetFloat64Atomic assumes that the number contains a float64 and
+// sets it to the passed value atomically.
+func (n *Number) SetFloat64Atomic(f float64) {
+ atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
+}
+
+// - swap
+
+// SwapNumber sets the number to the passed number and returns the old
+// number. Both this number and the passed number should be of the
+// same kind.
+func (n *Number) SwapNumber(nn Number) Number {
+ old := *n
+ n.SetNumber(nn)
+ return old
+}
+
+// SwapRaw sets the number to the passed raw value and returns the old
+// raw value. Both number and the raw number should represent the same
+// kind.
+func (n *Number) SwapRaw(r uint64) uint64 {
+ old := n.AsRaw()
+ n.SetRaw(r)
+ return old
+}
+
+// SwapInt64 assumes that the number contains an int64, sets it to the
+// passed value and returns the old int64 value.
+func (n *Number) SwapInt64(i int64) int64 {
+ old := n.AsInt64()
+ n.SetInt64(i)
+ return old
+}
+
+// SwapFloat64 assumes that the number contains an float64, sets it to
+// the passed value and returns the old float64 value.
+func (n *Number) SwapFloat64(f float64) float64 {
+ old := n.AsFloat64()
+ n.SetFloat64(f)
+ return old
+}
+
+// - swap atomic
+
+// SwapNumberAtomic sets the number to the passed number and returns
+// the old number atomically. Both this number and the passed number
+// should be of the same kind.
+func (n *Number) SwapNumberAtomic(nn Number) Number {
+ return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
+}
+
+// SwapRawAtomic sets the number to the passed raw value and returns
+// the old raw value atomically. Both number and the raw number should
+// represent the same kind.
+func (n *Number) SwapRawAtomic(r uint64) uint64 {
+ return atomic.SwapUint64(n.AsRawPtr(), r)
+}
+
+// SwapInt64Atomic assumes that the number contains an int64, sets it
+// to the passed value and returns the old int64 value atomically.
+func (n *Number) SwapInt64Atomic(i int64) int64 {
+ return atomic.SwapInt64(n.AsInt64Ptr(), i)
+}
+
+// SwapFloat64Atomic assumes that the number contains an float64, sets
+// it to the passed value and returns the old float64 value
+// atomically.
+func (n *Number) SwapFloat64Atomic(f float64) float64 {
+ return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
+}
+
+// - add
+
+// AddNumber assumes that this and the passed number are of the passed
+// kind and adds the passed number to this number.
+func (n *Number) AddNumber(kind NumberKind, nn Number) {
+ switch kind {
+ case Int64NumberKind:
+ n.AddInt64(nn.AsInt64())
+ case Float64NumberKind:
+ n.AddFloat64(nn.AsFloat64())
+ }
+}
+
+// AddRaw assumes that this number and the passed raw value are of the
+// passed kind and adds the passed raw value to this number.
+func (n *Number) AddRaw(kind NumberKind, r uint64) {
+ n.AddNumber(kind, NewNumberFromRaw(r))
+}
+
+// AddInt64 assumes that the number contains an int64 and adds the
+// passed int64 to it.
+func (n *Number) AddInt64(i int64) {
+ *n.AsInt64Ptr() += i
+}
+
+// AddFloat64 assumes that the number contains a float64 and adds the
+// passed float64 to it.
+func (n *Number) AddFloat64(f float64) {
+ *n.AsFloat64Ptr() += f
+}
+
+// - add atomic
+
+// AddNumberAtomic assumes that this and the passed number are of the
+// passed kind and adds the passed number to this number atomically.
+func (n *Number) AddNumberAtomic(kind NumberKind, nn Number) {
+ switch kind {
+ case Int64NumberKind:
+ n.AddInt64Atomic(nn.AsInt64())
+ case Float64NumberKind:
+ n.AddFloat64Atomic(nn.AsFloat64())
+ }
+}
+
+// AddRawAtomic assumes that this number and the passed raw value are
+// of the passed kind and adds the passed raw value to this number
+// atomically.
+func (n *Number) AddRawAtomic(kind NumberKind, r uint64) {
+ n.AddNumberAtomic(kind, NewNumberFromRaw(r))
+}
+
+// AddInt64Atomic assumes that the number contains an int64 and adds
+// the passed int64 to it atomically.
+func (n *Number) AddInt64Atomic(i int64) {
+ atomic.AddInt64(n.AsInt64Ptr(), i)
+}
+
+// AddFloat64Atomic assumes that the number contains a float64 and
+// adds the passed float64 to it atomically.
+func (n *Number) AddFloat64Atomic(f float64) {
+ for {
+ o := n.AsFloat64Atomic()
+ if n.CompareAndSwapFloat64(o, o+f) {
+ break
+ }
+ }
+}
+
+// - compare and swap (atomic only)
+
+// CompareAndSwapNumber does the atomic CAS operation on this
+// number. This number and passed old and new numbers should be of the
+// same kind.
+func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
+ return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
+}
+
+// CompareAndSwapRaw does the atomic CAS operation on this
+// number. This number and passed old and new raw values should be of
+// the same kind.
+func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
+ return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
+}
+
+// CompareAndSwapInt64 assumes that this number contains an int64 and
+// does the atomic CAS operation on it.
+func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
+ return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
+}
+
+// CompareAndSwapFloat64 assumes that this number contains a float64 and
+// does the atomic CAS operation on it.
+func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
+ return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
+}
+
+// - compare
+
+// CompareNumber compares two Numbers given their kind. Both numbers
+// should have the same kind. This returns:
+// 0 if the numbers are equal
+// -1 if the subject `n` is less than the argument `nn`
+// +1 if the subject `n` is greater than the argument `nn`
+func (n *Number) CompareNumber(kind NumberKind, nn Number) int {
+ switch kind {
+ case Int64NumberKind:
+ return n.CompareInt64(nn.AsInt64())
+ case Float64NumberKind:
+ return n.CompareFloat64(nn.AsFloat64())
+ default:
+ // you get what you deserve
+ return 0
+ }
+}
+
+// CompareRaw compares two numbers, where one is input as a raw
+// uint64, interpreting both values as a `kind` of number.
+func (n *Number) CompareRaw(kind NumberKind, r uint64) int {
+ return n.CompareNumber(kind, NewNumberFromRaw(r))
+}
+
+// CompareInt64 assumes that the Number contains an int64 and performs
+// a comparison between the value and the other value. It returns the
+// typical result of the compare function: -1 if the value is less
+// than the other, 0 if both are equal, 1 if the value is greater than
+// the other.
+func (n *Number) CompareInt64(i int64) int {
+ this := n.AsInt64()
+ if this < i {
+ return -1
+ } else if this > i {
+ return 1
+ }
+ return 0
+}
+
+// CompareFloat64 assumes that the Number contains a float64 and
+// performs a comparison between the value and the other value. It
+// returns the typical result of the compare function: -1 if the value
+// is less than the other, 0 if both are equal, 1 if the value is
+// greater than the other.
+//
+// Do not compare NaN values.
+func (n *Number) CompareFloat64(f float64) int {
+ this := n.AsFloat64()
+ if this < f {
+ return -1
+ } else if this > f {
+ return 1
+ }
+ return 0
+}
+
+// - relations to zero
+
+// IsPositive returns true if the actual value is greater than zero.
+func (n *Number) IsPositive(kind NumberKind) bool {
+ return n.compareWithZero(kind) > 0
+}
+
+// IsNegative returns true if the actual value is less than zero.
+func (n *Number) IsNegative(kind NumberKind) bool {
+ return n.compareWithZero(kind) < 0
+}
+
+// IsZero returns true if the actual value is equal to zero.
+func (n *Number) IsZero(kind NumberKind) bool {
+ return n.compareWithZero(kind) == 0
+}
+
+// - misc
+
+// Emit returns a string representation of the raw value of the
+// Number. A %d is used for integral values, %f for floating point
+// values.
+func (n *Number) Emit(kind NumberKind) string {
+ switch kind {
+ case Int64NumberKind:
+ return fmt.Sprintf("%d", n.AsInt64())
+ case Float64NumberKind:
+ return fmt.Sprintf("%f", n.AsFloat64())
+ default:
+ return ""
+ }
+}
+
+// AsInterface returns the number as an interface{}, typically used
+// for NumberKind-correct JSON conversion.
+func (n *Number) AsInterface(kind NumberKind) interface{} {
+ switch kind {
+ case Int64NumberKind:
+ return n.AsInt64()
+ case Float64NumberKind:
+ return n.AsFloat64()
+ default:
+ return math.NaN()
+ }
+}
+
+// - private stuff
+
+func (n *Number) compareWithZero(kind NumberKind) int {
+ switch kind {
+ case Int64NumberKind:
+ return n.CompareInt64(0)
+ case Float64NumberKind:
+ return n.CompareFloat64(0.)
+ default:
+ // you get what you deserve
+ return 0
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go b/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go
new file mode 100644
index 0000000..e99a874
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go
@@ -0,0 +1,24 @@
+// Code generated by "stringer -type=NumberKind"; DO NOT EDIT.
+
+package metric
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Int64NumberKind-0]
+ _ = x[Float64NumberKind-1]
+}
+
+const _NumberKind_name = "Int64NumberKindFloat64NumberKind"
+
+var _NumberKind_index = [...]uint8{0, 15, 32}
+
+func (i NumberKind) String() string {
+ if i < 0 || i >= NumberKind(len(_NumberKind_index)-1) {
+ return "NumberKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _NumberKind_name[_NumberKind_index[i]:_NumberKind_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/observer.go b/vendor/go.opentelemetry.io/otel/api/metric/observer.go
new file mode 100644
index 0000000..c347da7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/observer.go
@@ -0,0 +1,124 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// BatchObserver represents an Observer callback that can report
+// observations for multiple instruments.
+type BatchObserver struct {
+ meter Meter
+ runner AsyncBatchRunner
+}
+
+// Int64ValueObserver is a metric that captures a set of int64 values at a
+// point in time.
+type Int64ValueObserver struct {
+ asyncInstrument
+}
+
+// Float64ValueObserver is a metric that captures a set of float64 values
+// at a point in time.
+type Float64ValueObserver struct {
+ asyncInstrument
+}
+
+// Int64SumObserver is a metric that captures a precomputed sum of
+// int64 values at a point in time.
+type Int64SumObserver struct {
+ asyncInstrument
+}
+
+// Float64SumObserver is a metric that captures a precomputed sum of
+// float64 values at a point in time.
+type Float64SumObserver struct {
+ asyncInstrument
+}
+
+// Int64UpDownSumObserver is a metric that captures a precomputed sum of
+// int64 values at a point in time.
+type Int64UpDownSumObserver struct {
+ asyncInstrument
+}
+
+// Float64UpDownSumObserver is a metric that captures a precomputed sum of
+// float64 values at a point in time.
+type Float64UpDownSumObserver struct {
+ asyncInstrument
+}
+
+// Observation returns an Observation, a BatchObserverFunc
+// argument, for an asynchronous integer instrument.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (i Int64ValueObserver) Observation(v int64) Observation {
+ return Observation{
+ number: NewInt64Number(v),
+ instrument: i.instrument,
+ }
+}
+
+// Observation returns an Observation, a BatchObserverFunc
+// argument, for an asynchronous integer instrument.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (f Float64ValueObserver) Observation(v float64) Observation {
+ return Observation{
+ number: NewFloat64Number(v),
+ instrument: f.instrument,
+ }
+}
+
+// Observation returns an Observation, a BatchObserverFunc
+// argument, for an asynchronous integer instrument.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (i Int64SumObserver) Observation(v int64) Observation {
+ return Observation{
+ number: NewInt64Number(v),
+ instrument: i.instrument,
+ }
+}
+
+// Observation returns an Observation, a BatchObserverFunc
+// argument, for an asynchronous integer instrument.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (f Float64SumObserver) Observation(v float64) Observation {
+ return Observation{
+ number: NewFloat64Number(v),
+ instrument: f.instrument,
+ }
+}
+
+// Observation returns an Observation, a BatchObserverFunc
+// argument, for an asynchronous integer instrument.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (i Int64UpDownSumObserver) Observation(v int64) Observation {
+ return Observation{
+ number: NewInt64Number(v),
+ instrument: i.instrument,
+ }
+}
+
+// Observation returns an Observation, a BatchObserverFunc
+// argument, for an asynchronous integer instrument.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (f Float64UpDownSumObserver) Observation(v float64) Observation {
+ return Observation{
+ number: NewFloat64Number(v),
+ instrument: f.instrument,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go
new file mode 100644
index 0000000..ed9eccc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go
@@ -0,0 +1,170 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registry // import "go.opentelemetry.io/otel/api/metric/registry"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/api/metric"
+ "go.opentelemetry.io/otel/label"
+)
+
+// MeterProvider is a standard MeterProvider for wrapping `MeterImpl`
+type MeterProvider struct {
+ impl metric.MeterImpl
+}
+
+var _ metric.MeterProvider = (*MeterProvider)(nil)
+
+// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
+// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter
+// to wrap an implementation with uniqueness checking.
+type uniqueInstrumentMeterImpl struct {
+ lock sync.Mutex
+ impl metric.MeterImpl
+ state map[key]metric.InstrumentImpl
+}
+
+var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil)
+
+type key struct {
+ instrumentName string
+ instrumentationName string
+ InstrumentationVersion string
+}
+
+// NewMeterProvider returns a new provider that implements instrument
+// name-uniqueness checking.
+func NewMeterProvider(impl metric.MeterImpl) *MeterProvider {
+ return &MeterProvider{
+ impl: NewUniqueInstrumentMeterImpl(impl),
+ }
+}
+
+// Meter implements MeterProvider.
+func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
+ return metric.WrapMeterImpl(p.impl, instrumentationName, opts...)
+}
+
+// ErrMetricKindMismatch is the standard error for mismatched metric
+// instrument definitions.
+var ErrMetricKindMismatch = fmt.Errorf(
+ "A metric was already registered by this name with another kind or number type")
+
+// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with
+// the addition of uniqueness checking.
+func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl {
+ return &uniqueInstrumentMeterImpl{
+ impl: impl,
+ state: map[key]metric.InstrumentImpl{},
+ }
+}
+
+// RecordBatch implements metric.MeterImpl.
+func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, ms ...metric.Measurement) {
+ u.impl.RecordBatch(ctx, labels, ms...)
+}
+
+func keyOf(descriptor metric.Descriptor) key {
+ return key{
+ descriptor.Name(),
+ descriptor.InstrumentationName(),
+ descriptor.InstrumentationVersion(),
+ }
+}
+
+// NewMetricKindMismatchError formats an error that describes a
+// mismatched metric instrument definition.
+func NewMetricKindMismatchError(desc metric.Descriptor) error {
+ return fmt.Errorf("Metric was %s (%s %s)registered as a %s %s: %w",
+ desc.Name(),
+ desc.InstrumentationName(),
+ desc.InstrumentationVersion(),
+ desc.NumberKind(),
+ desc.MetricKind(),
+ ErrMetricKindMismatch)
+}
+
+// Compatible determines whether two metric.Descriptors are considered
+// the same for the purpose of uniqueness checking.
+func Compatible(candidate, existing metric.Descriptor) bool {
+ return candidate.MetricKind() == existing.MetricKind() &&
+ candidate.NumberKind() == existing.NumberKind()
+}
+
+// checkUniqueness returns an ErrMetricKindMismatch error if there is
+// a conflict between a descriptor that was already registered and the
+// `descriptor` argument. If there is an existing compatible
+// registration, this returns the already-registered instrument. If
+// there is no conflict and no prior registration, returns (nil, nil).
+func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
+ impl, ok := u.state[keyOf(descriptor)]
+ if !ok {
+ return nil, nil
+ }
+
+ if !Compatible(descriptor, impl.Descriptor()) {
+ return nil, NewMetricKindMismatchError(impl.Descriptor())
+ }
+
+ return impl, nil
+}
+
+// NewSyncInstrument implements metric.MeterImpl.
+func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+
+ impl, err := u.checkUniqueness(descriptor)
+
+ if err != nil {
+ return nil, err
+ } else if impl != nil {
+ return impl.(metric.SyncImpl), nil
+ }
+
+ syncInst, err := u.impl.NewSyncInstrument(descriptor)
+ if err != nil {
+ return nil, err
+ }
+ u.state[keyOf(descriptor)] = syncInst
+ return syncInst, nil
+}
+
+// NewAsyncInstrument implements metric.MeterImpl.
+func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
+ descriptor metric.Descriptor,
+ runner metric.AsyncRunner,
+) (metric.AsyncImpl, error) {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+
+ impl, err := u.checkUniqueness(descriptor)
+
+ if err != nil {
+ return nil, err
+ } else if impl != nil {
+ return impl.(metric.AsyncImpl), nil
+ }
+
+ asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner)
+ if err != nil {
+ return nil, err
+ }
+ u.state[keyOf(descriptor)] = asyncInst
+ return asyncInst, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go b/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go
new file mode 100644
index 0000000..122c9ba
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+// MeterImpl is the interface an SDK must implement to supply a Meter
+// implementation.
+type MeterImpl interface {
+ // RecordBatch atomically records a batch of measurements.
+ RecordBatch(ctx context.Context, labels []label.KeyValue, measurement ...Measurement)
+
+ // NewSyncInstrument returns a newly constructed
+ // synchronous instrument implementation or an error, should
+ // one occur.
+ NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
+
+ // NewAsyncInstrument returns a newly constructed
+ // asynchronous instrument implementation or an error, should
+ // one occur.
+ NewAsyncInstrument(
+ descriptor Descriptor,
+ runner AsyncRunner,
+ ) (AsyncImpl, error)
+}
+
+// InstrumentImpl is a common interface for synchronous and
+// asynchronous instruments.
+type InstrumentImpl interface {
+ // Implementation returns the underlying implementation of the
+ // instrument, which allows the implementation to gain access
+ // to its own representation especially from a `Measurement`.
+ Implementation() interface{}
+
+ // Descriptor returns a copy of the instrument's Descriptor.
+ Descriptor() Descriptor
+}
+
+// SyncImpl is the implementation-level interface to a generic
+// synchronous instrument (e.g., ValueRecorder and Counter instruments).
+type SyncImpl interface {
+ InstrumentImpl
+
+ // Bind creates an implementation-level bound instrument,
+ // binding a label set with this instrument implementation.
+ Bind(labels []label.KeyValue) BoundSyncImpl
+
+ // RecordOne captures a single synchronous metric event.
+ RecordOne(ctx context.Context, number Number, labels []label.KeyValue)
+}
+
+// BoundSyncImpl is the implementation-level interface to a
+// generic bound synchronous instrument
+type BoundSyncImpl interface {
+
+ // RecordOne captures a single synchronous metric event.
+ RecordOne(ctx context.Context, number Number)
+
+ // Unbind frees the resources associated with this bound instrument. It
+ // does not affect the metric this bound instrument was created through.
+ Unbind()
+}
+
+// AsyncImpl is an implementation-level interface to an
+// asynchronous instrument (e.g., Observer instruments).
+type AsyncImpl interface {
+ InstrumentImpl
+}
+
+// WrapMeterImpl constructs a `Meter` implementation from a
+// `MeterImpl` implementation.
+func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter {
+ return Meter{
+ impl: impl,
+ name: instrumentationName,
+ version: NewMeterConfig(opts...).InstrumentationVersion,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/sync.go b/vendor/go.opentelemetry.io/otel/api/metric/sync.go
new file mode 100644
index 0000000..a08a65b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/sync.go
@@ -0,0 +1,192 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil.
+var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation")
+
+// Measurement is used for reporting a synchronous batch of metric
+// values. Instances of this type should be created by synchronous
+// instruments (e.g., Int64Counter.Measurement()).
+type Measurement struct {
+ // number needs to be aligned for 64-bit atomic operations.
+ number Number
+ instrument SyncImpl
+}
+
+// syncInstrument contains a SyncImpl.
+type syncInstrument struct {
+ instrument SyncImpl
+}
+
+// syncBoundInstrument contains a BoundSyncImpl.
+type syncBoundInstrument struct {
+ boundInstrument BoundSyncImpl
+}
+
+// asyncInstrument contains a AsyncImpl.
+type asyncInstrument struct {
+ instrument AsyncImpl
+}
+
+// SyncImpl returns the instrument that created this measurement.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (m Measurement) SyncImpl() SyncImpl {
+ return m.instrument
+}
+
+// Number returns a number recorded in this measurement.
+func (m Measurement) Number() Number {
+ return m.number
+}
+
+// AsyncImpl returns the instrument that created this observation.
+// This returns an implementation-level object for use by the SDK,
+// users should not refer to this.
+func (m Observation) AsyncImpl() AsyncImpl {
+ return m.instrument
+}
+
+// Number returns a number recorded in this observation.
+func (m Observation) Number() Number {
+ return m.number
+}
+
+// AsyncImpl implements AsyncImpl.
+func (a asyncInstrument) AsyncImpl() AsyncImpl {
+ return a.instrument
+}
+
+// SyncImpl returns the implementation object for synchronous instruments.
+func (s syncInstrument) SyncImpl() SyncImpl {
+ return s.instrument
+}
+
+func (s syncInstrument) bind(labels []label.KeyValue) syncBoundInstrument {
+ return newSyncBoundInstrument(s.instrument.Bind(labels))
+}
+
+func (s syncInstrument) float64Measurement(value float64) Measurement {
+ return newMeasurement(s.instrument, NewFloat64Number(value))
+}
+
+func (s syncInstrument) int64Measurement(value int64) Measurement {
+ return newMeasurement(s.instrument, NewInt64Number(value))
+}
+
+func (s syncInstrument) directRecord(ctx context.Context, number Number, labels []label.KeyValue) {
+ s.instrument.RecordOne(ctx, number, labels)
+}
+
+func (h syncBoundInstrument) directRecord(ctx context.Context, number Number) {
+ h.boundInstrument.RecordOne(ctx, number)
+}
+
+// Unbind calls SyncImpl.Unbind.
+func (h syncBoundInstrument) Unbind() {
+ h.boundInstrument.Unbind()
+}
+
+// checkNewAsync receives an AsyncImpl and potential
+// error, and returns the same types, checking for and ensuring that
+// the returned interface is not nil.
+func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) {
+ if instrument == nil {
+ if err == nil {
+ err = ErrSDKReturnedNilImpl
+ }
+ instrument = NoopAsync{}
+ }
+ return asyncInstrument{
+ instrument: instrument,
+ }, err
+}
+
+// checkNewSync receives an SyncImpl and potential
+// error, and returns the same types, checking for and ensuring that
+// the returned interface is not nil.
+func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) {
+ if instrument == nil {
+ if err == nil {
+ err = ErrSDKReturnedNilImpl
+ }
+ // Note: an alternate behavior would be to synthesize a new name
+ // or group all duplicately-named instruments of a certain type
+ // together and use a tag for the original name, e.g.,
+ // name = 'invalid.counter.int64'
+ // label = 'original-name=duplicate-counter-name'
+ instrument = NoopSync{}
+ }
+ return syncInstrument{
+ instrument: instrument,
+ }, err
+}
+
+func newSyncBoundInstrument(boundInstrument BoundSyncImpl) syncBoundInstrument {
+ return syncBoundInstrument{
+ boundInstrument: boundInstrument,
+ }
+}
+
+func newMeasurement(instrument SyncImpl, number Number) Measurement {
+ return Measurement{
+ instrument: instrument,
+ number: number,
+ }
+}
+
+// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter.
+func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) {
+ common, err := checkNewSync(syncInst, err)
+ return Int64Counter{syncInstrument: common}, err
+}
+
+// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter.
+func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) {
+ common, err := checkNewSync(syncInst, err)
+ return Float64Counter{syncInstrument: common}, err
+}
+
+// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter.
+func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) {
+ common, err := checkNewSync(syncInst, err)
+ return Int64UpDownCounter{syncInstrument: common}, err
+}
+
+// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter.
+func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) {
+ common, err := checkNewSync(syncInst, err)
+ return Float64UpDownCounter{syncInstrument: common}, err
+}
+
+// wrapInt64ValueRecorderInstrument converts a SyncImpl into Int64ValueRecorder.
+func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) {
+ common, err := checkNewSync(syncInst, err)
+ return Int64ValueRecorder{syncInstrument: common}, err
+}
+
+// wrapFloat64ValueRecorderInstrument converts a SyncImpl into Float64ValueRecorder.
+func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) {
+ common, err := checkNewSync(syncInst, err)
+ return Float64ValueRecorder{syncInstrument: common}, err
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go b/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go
new file mode 100644
index 0000000..1018246
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+// Float64UpDownCounter is a metric instrument that sums floating
+// point values.
+type Float64UpDownCounter struct {
+ syncInstrument
+}
+
+// Int64UpDownCounter is a metric instrument that sums integer values.
+type Int64UpDownCounter struct {
+ syncInstrument
+}
+
+// BoundFloat64UpDownCounter is a bound instrument for Float64UpDownCounter.
+//
+// It inherits the Unbind function from syncBoundInstrument.
+type BoundFloat64UpDownCounter struct {
+ syncBoundInstrument
+}
+
+// BoundInt64UpDownCounter is a boundInstrument for Int64UpDownCounter.
+//
+// It inherits the Unbind function from syncBoundInstrument.
+type BoundInt64UpDownCounter struct {
+ syncBoundInstrument
+}
+
+// Bind creates a bound instrument for this counter. The labels are
+// associated with values recorded via subsequent calls to Record.
+func (c Float64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundFloat64UpDownCounter) {
+ h.syncBoundInstrument = c.bind(labels)
+ return
+}
+
+// Bind creates a bound instrument for this counter. The labels are
+// associated with values recorded via subsequent calls to Record.
+func (c Int64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundInt64UpDownCounter) {
+ h.syncBoundInstrument = c.bind(labels)
+ return
+}
+
+// Measurement creates a Measurement object to use with batch
+// recording.
+func (c Float64UpDownCounter) Measurement(value float64) Measurement {
+ return c.float64Measurement(value)
+}
+
+// Measurement creates a Measurement object to use with batch
+// recording.
+func (c Int64UpDownCounter) Measurement(value int64) Measurement {
+ return c.int64Measurement(value)
+}
+
+// Add adds the value to the counter's sum. The labels should contain
+// the keys and values to be associated with this value.
+func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
+ c.directRecord(ctx, NewFloat64Number(value), labels)
+}
+
+// Add adds the value to the counter's sum. The labels should contain
+// the keys and values to be associated with this value.
+func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
+ c.directRecord(ctx, NewInt64Number(value), labels)
+}
+
+// Add adds the value to the counter's sum using the labels
+// previously bound to this counter via Bind()
+func (b BoundFloat64UpDownCounter) Add(ctx context.Context, value float64) {
+ b.directRecord(ctx, NewFloat64Number(value))
+}
+
+// Add adds the value to the counter's sum using the labels
+// previously bound to this counter via Bind()
+func (b BoundInt64UpDownCounter) Add(ctx context.Context, value int64) {
+ b.directRecord(ctx, NewInt64Number(value))
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go b/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go
new file mode 100644
index 0000000..fa7e2d4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go
@@ -0,0 +1,97 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+// Float64ValueRecorder is a metric that records float64 values.
+type Float64ValueRecorder struct {
+ syncInstrument
+}
+
+// Int64ValueRecorder is a metric that records int64 values.
+type Int64ValueRecorder struct {
+ syncInstrument
+}
+
+// BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder.
+//
+// It inherits the Unbind function from syncBoundInstrument.
+type BoundFloat64ValueRecorder struct {
+ syncBoundInstrument
+}
+
+// BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder.
+//
+// It inherits the Unbind function from syncBoundInstrument.
+type BoundInt64ValueRecorder struct {
+ syncBoundInstrument
+}
+
+// Bind creates a bound instrument for this ValueRecorder. The labels are
+// associated with values recorded via subsequent calls to Record.
+func (c Float64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundFloat64ValueRecorder) {
+ h.syncBoundInstrument = c.bind(labels)
+ return
+}
+
+// Bind creates a bound instrument for this ValueRecorder. The labels are
+// associated with values recorded via subsequent calls to Record.
+func (c Int64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundInt64ValueRecorder) {
+ h.syncBoundInstrument = c.bind(labels)
+ return
+}
+
+// Measurement creates a Measurement object to use with batch
+// recording.
+func (c Float64ValueRecorder) Measurement(value float64) Measurement {
+ return c.float64Measurement(value)
+}
+
+// Measurement creates a Measurement object to use with batch
+// recording.
+func (c Int64ValueRecorder) Measurement(value int64) Measurement {
+ return c.int64Measurement(value)
+}
+
+// Record adds a new value to the list of ValueRecorder's records. The
+// labels should contain the keys and values to be associated with
+// this value.
+func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...label.KeyValue) {
+ c.directRecord(ctx, NewFloat64Number(value), labels)
+}
+
+// Record adds a new value to the ValueRecorder's distribution. The
+// labels should contain the keys and values to be associated with
+// this value.
+func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...label.KeyValue) {
+ c.directRecord(ctx, NewInt64Number(value), labels)
+}
+
+// Record adds a new value to the ValueRecorder's distribution using the labels
+// previously bound to the ValueRecorder via Bind().
+func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) {
+ b.directRecord(ctx, NewFloat64Number(value))
+}
+
+// Record adds a new value to the ValueRecorder's distribution using the labels
+// previously bound to the ValueRecorder via Bind().
+func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) {
+ b.directRecord(ctx, NewInt64Number(value))
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/api.go b/vendor/go.opentelemetry.io/otel/api/trace/api.go
new file mode 100644
index 0000000..3f15f48
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/trace/api.go
@@ -0,0 +1,314 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/label"
+)
+
+// TracerProvider provides access to instrumentation Tracers.
+type TracerProvider interface {
+ // Tracer creates an implementation of the Tracer interface.
+ // The instrumentationName must be the name of the library providing
+ // instrumentation. This name may be the same as the instrumented code
+ // only if that code provides built-in instrumentation. If the
+ // instrumentationName is empty, then a implementation defined default
+ // name will be used instead.
+ Tracer(instrumentationName string, opts ...TracerOption) Tracer
+}
+
+// TracerConfig is a group of options for a Tracer.
+//
+// Most users will use the tracer options instead.
+type TracerConfig struct {
+ // InstrumentationVersion is the version of the instrumentation library.
+ InstrumentationVersion string
+}
+
+// NewTracerConfig applies all the options to a returned TracerConfig.
+// The default value for all the fields of the returned TracerConfig are the
+// default zero value of the type. Also, this does not perform any validation
+// on the returned TracerConfig (e.g. no uniqueness checking or bounding of
+// data), instead it is left to the implementations of the SDK to perform this
+// action.
+func NewTracerConfig(opts ...TracerOption) *TracerConfig {
+ config := new(TracerConfig)
+ for _, option := range opts {
+ option.Apply(config)
+ }
+ return config
+}
+
+// TracerOption applies an options to a TracerConfig.
+type TracerOption interface {
+ Apply(*TracerConfig)
+}
+
+type instVersionTracerOption string
+
+func (o instVersionTracerOption) Apply(c *TracerConfig) { c.InstrumentationVersion = string(o) }
+
+// WithInstrumentationVersion sets the instrumentation version for a Tracer.
+func WithInstrumentationVersion(version string) TracerOption {
+ return instVersionTracerOption(version)
+}
+
+type Tracer interface {
+ // Start a span.
+ Start(ctx context.Context, spanName string, opts ...SpanOption) (context.Context, Span)
+}
+
+// ErrorConfig provides options to set properties of an error
+// event at the time it is recorded.
+//
+// Most users will use the error options instead.
+type ErrorConfig struct {
+ Timestamp time.Time
+ StatusCode codes.Code
+}
+
+// ErrorOption applies changes to ErrorConfig that sets options when an error event is recorded.
+type ErrorOption func(*ErrorConfig)
+
+// WithErrorTime sets the time at which the error event should be recorded.
+func WithErrorTime(t time.Time) ErrorOption {
+ return func(c *ErrorConfig) {
+ c.Timestamp = t
+ }
+}
+
+// WithErrorStatus indicates the span status that should be set when recording an error event.
+func WithErrorStatus(s codes.Code) ErrorOption {
+ return func(c *ErrorConfig) {
+ c.StatusCode = s
+ }
+}
+
+type Span interface {
+ // Tracer returns tracer used to create this span. Tracer cannot be nil.
+ Tracer() Tracer
+
+ // End completes the span. No updates are allowed to span after it
+ // ends. The only exception is setting status of the span.
+ End(options ...SpanOption)
+
+ // AddEvent adds an event to the span.
+ AddEvent(ctx context.Context, name string, attrs ...label.KeyValue)
+ // AddEventWithTimestamp adds an event with a custom timestamp
+ // to the span.
+ AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue)
+
+ // IsRecording returns true if the span is active and recording events is enabled.
+ IsRecording() bool
+
+ // RecordError records an error as a span event.
+ RecordError(ctx context.Context, err error, opts ...ErrorOption)
+
+ // SpanContext returns span context of the span. Returned SpanContext is usable
+ // even after the span ends.
+ SpanContext() SpanContext
+
+ // SetStatus sets the status of the span in the form of a code
+ // and a message. SetStatus overrides the value of previous
+ // calls to SetStatus on the Span.
+ //
+ // The default span status is OK, so it is not necessary to
+ // explicitly set an OK status on successful Spans unless it
+ // is to add an OK message or to override a previous status on the Span.
+ SetStatus(code codes.Code, msg string)
+
+ // SetName sets the name of the span.
+ SetName(name string)
+
+ // Set span attributes
+ SetAttributes(kv ...label.KeyValue)
+}
+
+// SpanConfig is a group of options for a Span.
+//
+// Most users will use span options instead.
+type SpanConfig struct {
+ // Attributes describe the associated qualities of a Span.
+ Attributes []label.KeyValue
+ // Timestamp is a time in a Span life-cycle.
+ Timestamp time.Time
+ // Links are the associations a Span has with other Spans.
+ Links []Link
+ // Record is the recording state of a Span.
+ Record bool
+ // NewRoot identifies a Span as the root Span for a new trace. This is
+ // commonly used when an existing trace crosses trust boundaries and the
+ // remote parent span context should be ignored for security.
+ NewRoot bool
+ // SpanKind is the role a Span has in a trace.
+ SpanKind SpanKind
+}
+
+// NewSpanConfig applies all the options to a returned SpanConfig.
+// The default value for all the fields of the returned SpanConfig are the
+// default zero value of the type. Also, this does not perform any validation
+// on the returned SpanConfig (e.g. no uniqueness checking or bounding of
+// data). Instead, it is left to the implementations of the SDK to perform this
+// action.
+func NewSpanConfig(opts ...SpanOption) *SpanConfig {
+ c := new(SpanConfig)
+ for _, option := range opts {
+ option.Apply(c)
+ }
+ return c
+}
+
+// SpanOption applies an option to a SpanConfig.
+type SpanOption interface {
+ Apply(*SpanConfig)
+}
+
+type attributeSpanOption []label.KeyValue
+
+func (o attributeSpanOption) Apply(c *SpanConfig) {
+ c.Attributes = append(c.Attributes, []label.KeyValue(o)...)
+}
+
+// WithAttributes adds the attributes to a span. These attributes are meant to
+// provide additional information about the work the Span represents. The
+// attributes are added to the existing Span attributes, i.e. this does not
+// overwrite.
+func WithAttributes(attributes ...label.KeyValue) SpanOption {
+ return attributeSpanOption(attributes)
+}
+
+type timestampSpanOption time.Time
+
+func (o timestampSpanOption) Apply(c *SpanConfig) { c.Timestamp = time.Time(o) }
+
+// WithTimestamp sets the time of a Span life-cycle moment (e.g. started or
+// stopped).
+func WithTimestamp(t time.Time) SpanOption {
+ return timestampSpanOption(t)
+}
+
+type linksSpanOption []Link
+
+func (o linksSpanOption) Apply(c *SpanConfig) { c.Links = append(c.Links, []Link(o)...) }
+
+// WithLinks adds links to a Span. The links are added to the existing Span
+// links, i.e. this does not overwrite.
+func WithLinks(links ...Link) SpanOption {
+ return linksSpanOption(links)
+}
+
+type recordSpanOption bool
+
+func (o recordSpanOption) Apply(c *SpanConfig) { c.Record = bool(o) }
+
+// WithRecord specifies that the span should be recorded. It is important to
+// note that implementations may override this option, i.e. if the span is a
+// child of an un-sampled trace.
+func WithRecord() SpanOption {
+ return recordSpanOption(true)
+}
+
+type newRootSpanOption bool
+
+func (o newRootSpanOption) Apply(c *SpanConfig) { c.NewRoot = bool(o) }
+
+// WithNewRoot specifies that the Span should be treated as a root Span. Any
+// existing parent span context will be ignored when defining the Span's trace
+// identifiers.
+func WithNewRoot() SpanOption {
+ return newRootSpanOption(true)
+}
+
+type spanKindSpanOption SpanKind
+
+func (o spanKindSpanOption) Apply(c *SpanConfig) { c.SpanKind = SpanKind(o) }
+
+// WithSpanKind sets the SpanKind of a Span.
+func WithSpanKind(kind SpanKind) SpanOption {
+ return spanKindSpanOption(kind)
+}
+
+// Link is used to establish relationship between two spans within the same Trace or
+// across different Traces. Few examples of Link usage.
+// 1. Batch Processing: A batch of elements may contain elements associated with one
+// or more traces/spans. Since there can only be one parent SpanContext, Link is
+// used to keep reference to SpanContext of all elements in the batch.
+// 2. Public Endpoint: A SpanContext in incoming client request on a public endpoint
+// is untrusted from service provider perspective. In such case it is advisable to
+// start a new trace with appropriate sampling decision.
+// However, it is desirable to associate incoming SpanContext to new trace initiated
+// on service provider side so two traces (from Client and from Service Provider) can
+// be correlated.
+type Link struct {
+ SpanContext
+ Attributes []label.KeyValue
+}
+
+// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span
+// will be processed and visualized by various backends.
+type SpanKind int
+
+const (
+ // As a convenience, these match the proto definition, see
+ // opentelemetry/proto/trace/v1/trace.proto
+ //
+ // The unspecified value is not a valid `SpanKind`. Use
+ // `ValidateSpanKind()` to coerce a span kind to a valid
+ // value.
+ SpanKindUnspecified SpanKind = 0
+ SpanKindInternal SpanKind = 1
+ SpanKindServer SpanKind = 2
+ SpanKindClient SpanKind = 3
+ SpanKindProducer SpanKind = 4
+ SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value. This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+ switch spanKind {
+ case SpanKindInternal,
+ SpanKindServer,
+ SpanKindClient,
+ SpanKindProducer,
+ SpanKindConsumer:
+ // valid
+ return spanKind
+ default:
+ return SpanKindInternal
+ }
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+ switch sk {
+ case SpanKindInternal:
+ return "internal"
+ case SpanKindServer:
+ return "server"
+ case SpanKindClient:
+ return "client"
+ case SpanKindProducer:
+ return "producer"
+ case SpanKindConsumer:
+ return "consumer"
+ default:
+ return "unspecified"
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/context.go b/vendor/go.opentelemetry.io/otel/api/trace/context.go
new file mode 100644
index 0000000..0f330e3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/trace/context.go
@@ -0,0 +1,55 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+ "context"
+)
+
+type traceContextKeyType int
+
+const (
+ currentSpanKey traceContextKeyType = iota
+ remoteContextKey
+)
+
+// ContextWithSpan creates a new context with a current span set to
+// the passed span.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+ return context.WithValue(ctx, currentSpanKey, span)
+}
+
+// SpanFromContext returns the current span stored in the context.
+func SpanFromContext(ctx context.Context) Span {
+ if span, has := ctx.Value(currentSpanKey).(Span); has {
+ return span
+ }
+ return noopSpan{}
+}
+
+// ContextWithRemoteSpanContext creates a new context with a remote
+// span context set to the passed span context.
+func ContextWithRemoteSpanContext(ctx context.Context, sc SpanContext) context.Context {
+ return context.WithValue(ctx, remoteContextKey, sc)
+}
+
+// RemoteSpanContextFromContext returns the remote span context stored
+// in the context.
+func RemoteSpanContextFromContext(ctx context.Context) SpanContext {
+ if sc, ok := ctx.Value(remoteContextKey).(SpanContext); ok {
+ return sc
+ }
+ return EmptySpanContext()
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/doc.go b/vendor/go.opentelemetry.io/otel/api/trace/doc.go
new file mode 100644
index 0000000..24f2dfb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/trace/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package trace provides tracing support.
+package trace // import "go.opentelemetry.io/otel/api/trace"
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go
new file mode 100644
index 0000000..f014f21
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/label"
+)
+
+type noopSpan struct {
+}
+
+var _ Span = noopSpan{}
+
+// SpanContext returns an invalid span context.
+func (noopSpan) SpanContext() SpanContext {
+ return EmptySpanContext()
+}
+
+// IsRecording always returns false for NoopSpan.
+func (noopSpan) IsRecording() bool {
+ return false
+}
+
+// SetStatus does nothing.
+func (noopSpan) SetStatus(status codes.Code, msg string) {
+}
+
+// SetError does nothing.
+func (noopSpan) SetError(v bool) {
+}
+
+// SetAttributes does nothing.
+func (noopSpan) SetAttributes(attributes ...label.KeyValue) {
+}
+
+// End does nothing.
+func (noopSpan) End(options ...SpanOption) {
+}
+
+// RecordError does nothing.
+func (noopSpan) RecordError(ctx context.Context, err error, opts ...ErrorOption) {
+}
+
+// Tracer returns noop implementation of Tracer.
+func (noopSpan) Tracer() Tracer {
+ return noopTracer{}
+}
+
+// AddEvent does nothing.
+func (noopSpan) AddEvent(ctx context.Context, name string, attrs ...label.KeyValue) {
+}
+
+// AddEventWithTimestamp does nothing.
+func (noopSpan) AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue) {
+}
+
+// SetName does nothing.
+func (noopSpan) SetName(name string) {
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go
new file mode 100644
index 0000000..954f9e8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go
@@ -0,0 +1,29 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+ "context"
+)
+
+type noopTracer struct{}
+
+var _ Tracer = noopTracer{}
+
+// Start starts a noop span.
+func (noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
+ span := noopSpan{}
+ return ContextWithSpan(ctx, span), span
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go
new file mode 100644
index 0000000..414c8e3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go
@@ -0,0 +1,30 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+type noopTracerProvider struct{}
+
+var _ TracerProvider = noopTracerProvider{}
+
+// Tracer returns noop implementation of Tracer.
+func (p noopTracerProvider) Tracer(_ string, _ ...TracerOption) Tracer {
+ return noopTracer{}
+}
+
+// NoopTracerProvider returns a noop implementation of TracerProvider. The
+// Tracer and Spans created from the noop provider will also be noop.
+func NoopTracerProvider() TracerProvider {
+ return noopTracerProvider{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/span_context.go b/vendor/go.opentelemetry.io/otel/api/trace/span_context.go
new file mode 100644
index 0000000..914ce5f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/api/trace/span_context.go
@@ -0,0 +1,197 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+)
+
+const (
+ // FlagsSampled is a bitmask with the sampled bit set. A SpanContext
+ // with the sampling bit set means the span is sampled.
+ FlagsSampled = byte(0x01)
+ // FlagsDeferred is a bitmask with the deferred bit set. A SpanContext
+ // with the deferred bit set means the sampling decision has been
+ // defered to the receiver.
+ FlagsDeferred = byte(0x02)
+ // FlagsDebug is a bitmask with the debug bit set.
+ FlagsDebug = byte(0x04)
+
+ ErrInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
+
+ ErrInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
+ ErrNilTraceID errorConst = "trace-id can't be all zero"
+
+ ErrInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
+ ErrNilSpanID errorConst = "span-id can't be all zero"
+)
+
+type errorConst string
+
+func (e errorConst) Error() string {
+ return string(e)
+}
+
+// ID is a unique identity of a trace.
+type ID [16]byte
+
+var nilTraceID ID
+var _ json.Marshaler = nilTraceID
+
+// IsValid checks whether the trace ID is valid. A valid trace ID does
+// not consist of zeros only.
+func (t ID) IsValid() bool {
+ return !bytes.Equal(t[:], nilTraceID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceID
+// as a hex string.
+func (t ID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String returns the hex string representation form of a TraceID
+func (t ID) String() string {
+ return hex.EncodeToString(t[:])
+}
+
+// SpanID is a unique identify of a span in a trace.
+type SpanID [8]byte
+
+var nilSpanID SpanID
+var _ json.Marshaler = nilSpanID
+
+// IsValid checks whether the span ID is valid. A valid span ID does
+// not consist of zeros only.
+func (s SpanID) IsValid() bool {
+ return !bytes.Equal(s[:], nilSpanID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode SpanID
+// as a hex string.
+func (s SpanID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// String returns the hex string representation form of a SpanID
+func (s SpanID) String() string {
+ return hex.EncodeToString(s[:])
+}
+
+// IDFromHex returns a TraceID from a hex string if it is compliant
+// with the w3c trace-context specification.
+// See more at https://www.w3.org/TR/trace-context/#trace-id
+func IDFromHex(h string) (ID, error) {
+ t := ID{}
+ if len(h) != 32 {
+ return t, ErrInvalidTraceIDLength
+ }
+
+ if err := decodeHex(h, t[:]); err != nil {
+ return t, err
+ }
+
+ if !t.IsValid() {
+ return t, ErrNilTraceID
+ }
+ return t, nil
+}
+
+// SpanIDFromHex returns a SpanID from a hex string if it is compliant
+// with the w3c trace-context specification.
+// See more at https://www.w3.org/TR/trace-context/#parent-id
+func SpanIDFromHex(h string) (SpanID, error) {
+ s := SpanID{}
+ if len(h) != 16 {
+ return s, ErrInvalidSpanIDLength
+ }
+
+ if err := decodeHex(h, s[:]); err != nil {
+ return s, err
+ }
+
+ if !s.IsValid() {
+ return s, ErrNilSpanID
+ }
+ return s, nil
+}
+
+func decodeHex(h string, b []byte) error {
+ for _, r := range h {
+ switch {
+ case 'a' <= r && r <= 'f':
+ continue
+ case '0' <= r && r <= '9':
+ continue
+ default:
+ return ErrInvalidHexID
+ }
+ }
+
+ decoded, err := hex.DecodeString(h)
+ if err != nil {
+ return err
+ }
+
+ copy(b, decoded)
+ return nil
+}
+
+// SpanContext contains basic information about the span - its trace
+// ID, span ID and trace flags.
+type SpanContext struct {
+ TraceID ID
+ SpanID SpanID
+ TraceFlags byte
+}
+
+// EmptySpanContext is meant for internal use to return invalid span
+// context during error conditions.
+func EmptySpanContext() SpanContext {
+ return SpanContext{}
+}
+
+// IsValid checks if the span context is valid. A valid span context
+// has a valid trace ID and a valid span ID.
+func (sc SpanContext) IsValid() bool {
+ return sc.HasTraceID() && sc.HasSpanID()
+}
+
+// HasTraceID checks if the span context has a valid trace ID.
+func (sc SpanContext) HasTraceID() bool {
+ return sc.TraceID.IsValid()
+}
+
+// HasSpanID checks if the span context has a valid span ID.
+func (sc SpanContext) HasSpanID() bool {
+ return sc.SpanID.IsValid()
+}
+
+// IsDeferred returns if the deferred bit is set in the trace flags.
+func (sc SpanContext) IsDeferred() bool {
+ return sc.TraceFlags&FlagsDeferred == FlagsDeferred
+}
+
+// IsDebug returns if the debug bit is set in the trace flags.
+func (sc SpanContext) IsDebug() bool {
+ return sc.TraceFlags&FlagsDebug == FlagsDebug
+}
+
+// IsSampled returns if the sampling bit is set in the trace flags.
+func (sc SpanContext) IsSampled() bool {
+ return sc.TraceFlags&FlagsSampled == FlagsSampled
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage.go b/vendor/go.opentelemetry.io/otel/baggage.go
new file mode 100644
index 0000000..bf0f1f6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/internal/baggage"
+ "go.opentelemetry.io/otel/label"
+)
+
+// Baggage returns a copy of the baggage in ctx.
+func Baggage(ctx context.Context) label.Set {
+ // TODO (MrAlias, #1222): The underlying storage, the Map, shares many of
+ // the functional elements of the label.Set. These should be unified so
+ // this conversion is unnecessary and there is no performance hit calling
+ // this.
+ m := baggage.MapFromContext(ctx)
+ values := make([]label.KeyValue, 0, m.Len())
+ m.Foreach(func(kv label.KeyValue) bool {
+ values = append(values, kv)
+ return true
+ })
+ return label.NewSet(values...)
+}
+
+// BaggageValue returns the value related to key in the baggage of ctx. If no
+// value is set, the returned label.Value will be an uninitialized zero-value
+// with type INVALID.
+func BaggageValue(ctx context.Context, key label.Key) label.Value {
+ v, _ := baggage.MapFromContext(ctx).Value(key)
+ return v
+}
+
+// ContextWithBaggageValues returns a copy of parent with pairs updated in the baggage.
+func ContextWithBaggageValues(parent context.Context, pairs ...label.KeyValue) context.Context {
+ m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
+ MultiKV: pairs,
+ })
+ return baggage.ContextWithMap(parent, m)
+}
+
+// ContextWithoutBaggageValues returns a copy of parent in which the values related
+// to keys have been removed from the baggage.
+func ContextWithoutBaggageValues(parent context.Context, keys ...label.Key) context.Context {
+ m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
+ DropMultiK: keys,
+ })
+ return baggage.ContextWithMap(parent, m)
+}
+
+// ContextWithoutBaggage returns a copy of parent without baggage.
+func ContextWithoutBaggage(parent context.Context) context.Context {
+ return baggage.ContextWithNoCorrelationData(parent)
+}
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
new file mode 100644
index 0000000..28393a5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -0,0 +1,99 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package codes defines the canonical error codes used by OpenTelemetry.
+//
+// It conforms to [the OpenTelemetry
+// specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#statuscanonicalcode).
+package codes // import "go.opentelemetry.io/otel/codes"
+
+import (
+ "fmt"
+ "strconv"
+)
+
+const (
+ // Unset is the default status code.
+ Unset Code = 0
+ // Error indicates the operation contains an error.
+ Error Code = 1
+ // Ok indicates operation has been validated by an Application developers
+ // or Operator to have completed successfully, or contain no error.
+ Ok Code = 2
+
+ maxCode = 3
+)
+
+// Code is an 32-bit representation of a status state.
+type Code uint32
+
+var codeToStr = map[Code]string{
+ Unset: "Unset",
+ Error: "Error",
+ Ok: "Ok",
+}
+
+var strToCode = map[string]Code{
+ "Unset": Unset,
+ "Error": Error,
+ "Ok": Ok,
+}
+
+// String returns the Code as a string.
+func (c Code) String() string {
+ return codeToStr[c]
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+//
+// This is based on the functionality in the gRPC codes package:
+// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
+func (c *Code) UnmarshalJSON(b []byte) error {
+ // From json.Unmarshaler: By convention, to approximate the behavior of
+ // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+ // a no-op.
+ if string(b) == "null" {
+ return nil
+ }
+ if c == nil {
+ return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+ }
+
+ if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+ if ci >= maxCode {
+ return fmt.Errorf("invalid code: %q", ci)
+ }
+
+ *c = Code(ci)
+ return nil
+ }
+
+ if jc, ok := strToCode[string(b)]; ok {
+ *c = jc
+ return nil
+ }
+ return fmt.Errorf("invalid code: %q", string(b))
+}
+
+// MarshalJSON returns c as the JSON encoding of c.
+func (c *Code) MarshalJSON() ([]byte, error) {
+ if c == nil {
+ return []byte("null"), nil
+ }
+ str, ok := codeToStr[*c]
+ if !ok {
+ return nil, fmt.Errorf("invalid code: %d", *c)
+ }
+ return []byte(fmt.Sprintf("%q", str)), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
new file mode 100644
index 0000000..de2f76c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package otel contains OpenTelemetry Go packages.
+package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
new file mode 100644
index 0000000..d1c892f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+top_dir='.'
+if [[ $# -gt 0 ]]; then
+ top_dir="${1}"
+fi
+
+p=$(pwd)
+mod_dirs=()
+mapfile -t mod_dirs < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+for mod_dir in "${mod_dirs[@]}"; do
+ cd "${mod_dir}"
+ main_dirs=()
+ mapfile -t main_dirs < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
+ for main_dir in "${main_dirs[@]}"; do
+ echo ".${main_dir#${p}}"
+ done
+ cd "${p}"
+done
diff --git a/vendor/go.opentelemetry.io/otel/go.mod b/vendor/go.opentelemetry.io/otel/go.mod
new file mode 100644
index 0000000..95f9aa4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/go.mod
@@ -0,0 +1,8 @@
+module go.opentelemetry.io/otel
+
+go 1.14
+
+require (
+ github.com/google/go-cmp v0.5.2
+ github.com/stretchr/testify v1.6.1
+)
diff --git a/vendor/go.opentelemetry.io/otel/go.sum b/vendor/go.opentelemetry.io/otel/go.sum
new file mode 100644
index 0000000..bf1148d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/go.sum
@@ -0,0 +1,15 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
new file mode 100644
index 0000000..6b5c0c2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
@@ -0,0 +1,338 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package baggage provides types and functions to manage W3C Baggage.
+package baggage
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/label"
+)
+
+type rawMap map[label.Key]label.Value
+type keySet map[label.Key]struct{}
+
+// Map is an immutable storage for correlations.
+type Map struct {
+ m rawMap
+}
+
+// MapUpdate contains information about correlation changes to be
+// made.
+type MapUpdate struct {
+ // DropSingleK contains a single key to be dropped from
+ // correlations. Use this to avoid an overhead of a slice
+ // allocation if there is only one key to drop.
+ DropSingleK label.Key
+ // DropMultiK contains all the keys to be dropped from
+ // correlations.
+ DropMultiK []label.Key
+
+ // SingleKV contains a single key-value pair to be added to
+ // correlations. Use this to avoid an overhead of a slice
+ // allocation if there is only one key-value pair to add.
+ SingleKV label.KeyValue
+ // MultiKV contains all the key-value pairs to be added to
+ // correlations.
+ MultiKV []label.KeyValue
+}
+
+func newMap(raw rawMap) Map {
+ return Map{
+ m: raw,
+ }
+}
+
+// NewEmptyMap creates an empty correlations map.
+func NewEmptyMap() Map {
+ return newMap(nil)
+}
+
+// NewMap creates a map with the contents of the update applied. In
+// this function, having an update with DropSingleK or DropMultiK
+// makes no sense - those fields are effectively ignored.
+func NewMap(update MapUpdate) Map {
+ return NewEmptyMap().Apply(update)
+}
+
+// Apply creates a copy of the map with the contents of the update
+// applied. Apply will first drop the keys from DropSingleK and
+// DropMultiK, then add key-value pairs from SingleKV and MultiKV.
+func (m Map) Apply(update MapUpdate) Map {
+ delSet, addSet := getModificationSets(update)
+ mapSize := getNewMapSize(m.m, delSet, addSet)
+
+ r := make(rawMap, mapSize)
+ for k, v := range m.m {
+ // do not copy items we want to drop
+ if _, ok := delSet[k]; ok {
+ continue
+ }
+ // do not copy items we would overwrite
+ if _, ok := addSet[k]; ok {
+ continue
+ }
+ r[k] = v
+ }
+ if update.SingleKV.Key.Defined() {
+ r[update.SingleKV.Key] = update.SingleKV.Value
+ }
+ for _, kv := range update.MultiKV {
+ r[kv.Key] = kv.Value
+ }
+ if len(r) == 0 {
+ r = nil
+ }
+ return newMap(r)
+}
+
+func getModificationSets(update MapUpdate) (delSet, addSet keySet) {
+ deletionsCount := len(update.DropMultiK)
+ if update.DropSingleK.Defined() {
+ deletionsCount++
+ }
+ if deletionsCount > 0 {
+ delSet = make(map[label.Key]struct{}, deletionsCount)
+ for _, k := range update.DropMultiK {
+ delSet[k] = struct{}{}
+ }
+ if update.DropSingleK.Defined() {
+ delSet[update.DropSingleK] = struct{}{}
+ }
+ }
+
+ additionsCount := len(update.MultiKV)
+ if update.SingleKV.Key.Defined() {
+ additionsCount++
+ }
+ if additionsCount > 0 {
+ addSet = make(map[label.Key]struct{}, additionsCount)
+ for _, k := range update.MultiKV {
+ addSet[k.Key] = struct{}{}
+ }
+ if update.SingleKV.Key.Defined() {
+ addSet[update.SingleKV.Key] = struct{}{}
+ }
+ }
+
+ return
+}
+
+func getNewMapSize(m rawMap, delSet, addSet keySet) int {
+ mapSizeDiff := 0
+ for k := range addSet {
+ if _, ok := m[k]; !ok {
+ mapSizeDiff++
+ }
+ }
+ for k := range delSet {
+ if _, ok := m[k]; ok {
+ if _, inAddSet := addSet[k]; !inAddSet {
+ mapSizeDiff--
+ }
+ }
+ }
+ return len(m) + mapSizeDiff
+}
+
+// Value gets a value from correlations map and returns a boolean
+// value indicating whether the key exist in the map.
+func (m Map) Value(k label.Key) (label.Value, bool) {
+ value, ok := m.m[k]
+ return value, ok
+}
+
+// HasValue returns a boolean value indicating whether the key exist
+// in the map.
+func (m Map) HasValue(k label.Key) bool {
+ _, has := m.Value(k)
+ return has
+}
+
+// Len returns a length of the map.
+func (m Map) Len() int {
+ return len(m.m)
+}
+
+// Foreach calls a passed callback once on each key-value pair until
+// all the key-value pairs of the map were iterated or the callback
+// returns false, whichever happens first.
+func (m Map) Foreach(f func(label.KeyValue) bool) {
+ for k, v := range m.m {
+ if !f(label.KeyValue{
+ Key: k,
+ Value: v,
+ }) {
+ return
+ }
+ }
+}
+
+type correlationsType struct{}
+
+// SetHookFunc describes a type of a callback that is called when
+// storing baggage in the context.
+type SetHookFunc func(context.Context) context.Context
+
+// GetHookFunc describes a type of a callback that is called when
+// getting baggage from the context.
+type GetHookFunc func(context.Context, Map) Map
+
+// value under this key is either of type Map or correlationsData
+var correlationsKey = &correlationsType{}
+
+type correlationsData struct {
+ m Map
+ setHook SetHookFunc
+ getHook GetHookFunc
+}
+
+func (d correlationsData) isHookless() bool {
+ return d.setHook == nil && d.getHook == nil
+}
+
+type hookKind int
+
+const (
+ hookKindSet hookKind = iota
+ hookKindGet
+)
+
+func (d *correlationsData) overrideHook(kind hookKind, setHook SetHookFunc, getHook GetHookFunc) {
+ switch kind {
+ case hookKindSet:
+ d.setHook = setHook
+ case hookKindGet:
+ d.getHook = getHook
+ }
+}
+
+// ContextWithSetHook installs a hook function that will be invoked
+// every time ContextWithMap is called. To avoid unnecessary callback
+// invocations (recursive or not), the callback can temporarily clear
+// the hooks from the context with the ContextWithNoHooks function.
+//
+// Note that NewContext also calls ContextWithMap, so the hook will be
+// invoked.
+//
+// Passing nil SetHookFunc creates a context with no set hook to call.
+//
+// This function should not be used by applications or libraries. It
+// is mostly for interoperation with other observability APIs.
+func ContextWithSetHook(ctx context.Context, hook SetHookFunc) context.Context {
+ return contextWithHook(ctx, hookKindSet, hook, nil)
+}
+
+// ContextWithGetHook installs a hook function that will be invoked
+// every time MapFromContext is called. To avoid unnecessary callback
+// invocations (recursive or not), the callback can temporarily clear
+// the hooks from the context with the ContextWithNoHooks function.
+//
+// Note that NewContext also calls MapFromContext, so the hook will be
+// invoked.
+//
+// Passing nil GetHookFunc creates a context with no get hook to call.
+//
+// This function should not be used by applications or libraries. It
+// is mostly for interoperation with other observability APIs.
+func ContextWithGetHook(ctx context.Context, hook GetHookFunc) context.Context {
+ return contextWithHook(ctx, hookKindGet, nil, hook)
+}
+
+func contextWithHook(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc) context.Context {
+ switch v := ctx.Value(correlationsKey).(type) {
+ case correlationsData:
+ v.overrideHook(kind, setHook, getHook)
+ if v.isHookless() {
+ return context.WithValue(ctx, correlationsKey, v.m)
+ }
+ return context.WithValue(ctx, correlationsKey, v)
+ case Map:
+ return contextWithOneHookAndMap(ctx, kind, setHook, getHook, v)
+ default:
+ m := NewEmptyMap()
+ return contextWithOneHookAndMap(ctx, kind, setHook, getHook, m)
+ }
+}
+
+func contextWithOneHookAndMap(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc, m Map) context.Context {
+ d := correlationsData{m: m}
+ d.overrideHook(kind, setHook, getHook)
+ if d.isHookless() {
+ return ctx
+ }
+ return context.WithValue(ctx, correlationsKey, d)
+}
+
+// ContextWithNoHooks creates a context with all the hooks
+// disabled. Also returns old set and get hooks. This function can be
+// used to temporarily clear the context from hooks and then reinstate
+// them by calling ContextWithSetHook and ContextWithGetHook functions
+// passing the hooks returned by this function.
+//
+// This function should not be used by applications or libraries. It
+// is mostly for interoperation with other observability APIs.
+func ContextWithNoHooks(ctx context.Context) (context.Context, SetHookFunc, GetHookFunc) {
+ switch v := ctx.Value(correlationsKey).(type) {
+ case correlationsData:
+ return context.WithValue(ctx, correlationsKey, v.m), v.setHook, v.getHook
+ default:
+ return ctx, nil, nil
+ }
+}
+
+// ContextWithMap returns a context with the Map entered into it.
+func ContextWithMap(ctx context.Context, m Map) context.Context {
+ switch v := ctx.Value(correlationsKey).(type) {
+ case correlationsData:
+ v.m = m
+ ctx = context.WithValue(ctx, correlationsKey, v)
+ if v.setHook != nil {
+ ctx = v.setHook(ctx)
+ }
+ return ctx
+ default:
+ return context.WithValue(ctx, correlationsKey, m)
+ }
+}
+
+// ContextWithNoCorrelationData returns a context stripped of correlation
+// data.
+func ContextWithNoCorrelationData(ctx context.Context) context.Context {
+ return context.WithValue(ctx, correlationsKey, nil)
+}
+
+// NewContext returns a context with the map from passed context
+// updated with the passed key-value pairs.
+func NewContext(ctx context.Context, keyvalues ...label.KeyValue) context.Context {
+ return ContextWithMap(ctx, MapFromContext(ctx).Apply(MapUpdate{
+ MultiKV: keyvalues,
+ }))
+}
+
+// MapFromContext gets the current Map from a Context.
+func MapFromContext(ctx context.Context) Map {
+ switch v := ctx.Value(correlationsKey).(type) {
+ case correlationsData:
+ if v.getHook != nil {
+ return v.getHook(ctx, v.m)
+ }
+ return v.m
+ case Map:
+ return v
+ default:
+ return NewEmptyMap()
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
new file mode 100644
index 0000000..dae825e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
@@ -0,0 +1,91 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "math"
+ "unsafe"
+)
+
+func BoolToRaw(b bool) uint64 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func RawToBool(r uint64) bool {
+ return r != 0
+}
+
+func Int64ToRaw(i int64) uint64 {
+ return uint64(i)
+}
+
+func RawToInt64(r uint64) int64 {
+ return int64(r)
+}
+
+func Uint64ToRaw(u uint64) uint64 {
+ return u
+}
+
+func RawToUint64(r uint64) uint64 {
+ return r
+}
+
+func Float64ToRaw(f float64) uint64 {
+ return math.Float64bits(f)
+}
+
+func RawToFloat64(r uint64) float64 {
+ return math.Float64frombits(r)
+}
+
+func Int32ToRaw(i int32) uint64 {
+ return uint64(i)
+}
+
+func RawToInt32(r uint64) int32 {
+ return int32(r)
+}
+
+func Uint32ToRaw(u uint32) uint64 {
+ return uint64(u)
+}
+
+func RawToUint32(r uint64) uint32 {
+ return uint32(r)
+}
+
+func Float32ToRaw(f float32) uint64 {
+ return Uint32ToRaw(math.Float32bits(f))
+}
+
+func RawToFloat32(r uint64) float32 {
+ return math.Float32frombits(RawToUint32(r))
+}
+
+func RawPtrToFloat64Ptr(r *uint64) *float64 {
+ return (*float64)(unsafe.Pointer(r))
+}
+
+func RawPtrToInt64Ptr(r *uint64) *int64 {
+ return (*int64)(unsafe.Pointer(r))
+}
+
+func RawPtrToUint64Ptr(r *uint64) *uint64 {
+ return r
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go
new file mode 100644
index 0000000..c09a737
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go
@@ -0,0 +1,35 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides noop tracing implementations for tracer and span.
+package noop
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+var (
+ // Tracer is a noop tracer that starts noop spans.
+ Tracer trace.Tracer
+
+ // Span is a noop Span.
+ Span trace.Span
+)
+
+func init() {
+ Tracer = trace.NoopTracerProvider().Tracer("")
+ _, Span = Tracer.Start(context.Background(), "")
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/doc.go b/vendor/go.opentelemetry.io/otel/label/doc.go
new file mode 100644
index 0000000..d631d23
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package label provides key and value labels.
+package label // import "go.opentelemetry.io/otel/label"
diff --git a/vendor/go.opentelemetry.io/otel/label/encoder.go b/vendor/go.opentelemetry.io/otel/label/encoder.go
new file mode 100644
index 0000000..6be7a3f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/encoder.go
@@ -0,0 +1,150 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+import (
+ "bytes"
+ "sync"
+ "sync/atomic"
+)
+
+type (
+ // Encoder is a mechanism for serializing a label set into a
+ // specific string representation that supports caching, to
+ // avoid repeated serialization. An example could be an
+ // exporter encoding the label set into a wire representation.
+ Encoder interface {
+ // Encode returns the serialized encoding of the label
+ // set using its Iterator. This result may be cached
+ // by a label.Set.
+ Encode(iterator Iterator) string
+
+ // ID returns a value that is unique for each class of
+ // label encoder. Label encoders allocate these using
+ // `NewEncoderID`.
+ ID() EncoderID
+ }
+
+ // EncoderID is used to identify distinct Encoder
+ // implementations, for caching encoded results.
+ EncoderID struct {
+ value uint64
+ }
+
+ // defaultLabelEncoder uses a sync.Pool of buffers to reduce
+ // the number of allocations used in encoding labels. This
+ // implementation encodes a comma-separated list of key=value,
+ // with '/'-escaping of '=', ',', and '\'.
+ defaultLabelEncoder struct {
+ // pool is a pool of labelset builders. The buffers in this
+ // pool grow to a size that most label encodings will not
+ // allocate new memory.
+ pool sync.Pool // *bytes.Buffer
+ }
+)
+
+// escapeChar is used to ensure uniqueness of the label encoding where
+// keys or values contain either '=' or ','. Since there is no parser
+// needed for this encoding and its only requirement is to be unique,
+// this choice is arbitrary. Users will see these in some exporters
+// (e.g., stdout), so the backslash ('\') is used as a conventional choice.
+const escapeChar = '\\'
+
+var (
+ _ Encoder = &defaultLabelEncoder{}
+
+ // encoderIDCounter is for generating IDs for other label
+ // encoders.
+ encoderIDCounter uint64
+
+ defaultEncoderOnce sync.Once
+ defaultEncoderID = NewEncoderID()
+ defaultEncoderInstance *defaultLabelEncoder
+)
+
+// NewEncoderID returns a unique label encoder ID. It should be
+// called once per each type of label encoder. Preferably in init() or
+// in var definition.
+func NewEncoderID() EncoderID {
+ return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
+}
+
+// DefaultEncoder returns a label encoder that encodes labels
+// in such a way that each escaped label's key is followed by an equal
+// sign and then by an escaped label's value. All key-value pairs are
+// separated by a comma.
+//
+// Escaping is done by prepending a backslash before either a
+// backslash, equal sign or a comma.
+func DefaultEncoder() Encoder {
+ defaultEncoderOnce.Do(func() {
+ defaultEncoderInstance = &defaultLabelEncoder{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+ },
+ }
+ })
+ return defaultEncoderInstance
+}
+
+// Encode is a part of an implementation of the LabelEncoder
+// interface.
+func (d *defaultLabelEncoder) Encode(iter Iterator) string {
+ buf := d.pool.Get().(*bytes.Buffer)
+ defer d.pool.Put(buf)
+ buf.Reset()
+
+ for iter.Next() {
+ i, keyValue := iter.IndexedLabel()
+ if i > 0 {
+ _, _ = buf.WriteRune(',')
+ }
+ copyAndEscape(buf, string(keyValue.Key))
+
+ _, _ = buf.WriteRune('=')
+
+ if keyValue.Value.Type() == STRING {
+ copyAndEscape(buf, keyValue.Value.AsString())
+ } else {
+ _, _ = buf.WriteString(keyValue.Value.Emit())
+ }
+ }
+ return buf.String()
+}
+
+// ID is a part of an implementation of the LabelEncoder interface.
+func (*defaultLabelEncoder) ID() EncoderID {
+ return defaultEncoderID
+}
+
+// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
+// making the default encoding unique.
+func copyAndEscape(buf *bytes.Buffer, val string) {
+ for _, ch := range val {
+ switch ch {
+ case '=', ',', escapeChar:
+ buf.WriteRune(escapeChar)
+ }
+ buf.WriteRune(ch)
+ }
+}
+
+// Valid returns true if this encoder ID was allocated by
+// `NewEncoderID`. Invalid encoder IDs will not be cached.
+func (id EncoderID) Valid() bool {
+ return id.value != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/iterator.go b/vendor/go.opentelemetry.io/otel/label/iterator.go
new file mode 100644
index 0000000..9e72239
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/iterator.go
@@ -0,0 +1,143 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+// Iterator allows iterating over the set of labels in order,
+// sorted by key.
+type Iterator struct {
+ storage *Set
+ idx int
+}
+
+// MergeIterator supports iterating over two sets of labels while
+// eliminating duplicate values from the combined set. The first
+// iterator value takes precedence.
+type MergeItererator struct {
+ one oneIterator
+ two oneIterator
+ current KeyValue
+}
+
+type oneIterator struct {
+ iter Iterator
+ done bool
+ label KeyValue
+}
+
+// Next moves the iterator to the next position. Returns false if there
+// are no more labels.
+func (i *Iterator) Next() bool {
+ i.idx++
+ return i.idx < i.Len()
+}
+
+// Label returns current KeyValue. Must be called only after Next returns
+// true.
+func (i *Iterator) Label() KeyValue {
+ kv, _ := i.storage.Get(i.idx)
+ return kv
+}
+
+// Attribute is a synonym for Label().
+func (i *Iterator) Attribute() KeyValue {
+ return i.Label()
+}
+
+// IndexedLabel returns current index and label. Must be called only
+// after Next returns true.
+func (i *Iterator) IndexedLabel() (int, KeyValue) {
+ return i.idx, i.Label()
+}
+
+// Len returns a number of labels in the iterator's `*Set`.
+func (i *Iterator) Len() int {
+ return i.storage.Len()
+}
+
+// ToSlice is a convenience function that creates a slice of labels
+// from the passed iterator. The iterator is set up to start from the
+// beginning before creating the slice.
+func (i *Iterator) ToSlice() []KeyValue {
+ l := i.Len()
+ if l == 0 {
+ return nil
+ }
+ i.idx = -1
+ slice := make([]KeyValue, 0, l)
+ for i.Next() {
+ slice = append(slice, i.Label())
+ }
+ return slice
+}
+
+// NewMergeIterator returns a MergeIterator for merging two label sets
+// Duplicates are resolved by taking the value from the first set.
+func NewMergeIterator(s1, s2 *Set) MergeItererator {
+ mi := MergeItererator{
+ one: makeOne(s1.Iter()),
+ two: makeOne(s2.Iter()),
+ }
+ return mi
+}
+
+func makeOne(iter Iterator) oneIterator {
+ oi := oneIterator{
+ iter: iter,
+ }
+ oi.advance()
+ return oi
+}
+
+func (oi *oneIterator) advance() {
+ if oi.done = !oi.iter.Next(); !oi.done {
+ oi.label = oi.iter.Label()
+ }
+}
+
+// Next returns true if there is another label available.
+func (m *MergeItererator) Next() bool {
+ if m.one.done && m.two.done {
+ return false
+ }
+ if m.one.done {
+ m.current = m.two.label
+ m.two.advance()
+ return true
+ }
+ if m.two.done {
+ m.current = m.one.label
+ m.one.advance()
+ return true
+ }
+ if m.one.label.Key == m.two.label.Key {
+ m.current = m.one.label // first iterator label value wins
+ m.one.advance()
+ m.two.advance()
+ return true
+ }
+ if m.one.label.Key < m.two.label.Key {
+ m.current = m.one.label
+ m.one.advance()
+ return true
+ }
+ m.current = m.two.label
+ m.two.advance()
+ return true
+}
+
+// Label returns the current value after Next() returns true.
+func (m *MergeItererator) Label() KeyValue {
+ return m.current
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/key.go b/vendor/go.opentelemetry.io/otel/label/key.go
new file mode 100644
index 0000000..7d72378
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/key.go
@@ -0,0 +1,169 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+// Key represents the key part in key-value pairs. It's a string. The
+// allowed character set in the key depends on the use of the key.
+type Key string
+
+// Bool creates a KeyValue instance with a BOOL Value.
+//
+// If creating both key and a bool value at the same time, then
+// instead of calling Key(name).Bool(value) consider using a
+// convenience function provided by the api/key package -
+// key.Bool(name, value).
+func (k Key) Bool(v bool) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: BoolValue(v),
+ }
+}
+
+// Int64 creates a KeyValue instance with an INT64 Value.
+//
+// If creating both key and an int64 value at the same time, then
+// instead of calling Key(name).Int64(value) consider using a
+// convenience function provided by the api/key package -
+// key.Int64(name, value).
+func (k Key) Int64(v int64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Int64Value(v),
+ }
+}
+
+// Uint64 creates a KeyValue instance with a UINT64 Value.
+//
+// If creating both key and a uint64 value at the same time, then
+// instead of calling Key(name).Uint64(value) consider using a
+// convenience function provided by the api/key package -
+// key.Uint64(name, value).
+func (k Key) Uint64(v uint64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Uint64Value(v),
+ }
+}
+
+// Float64 creates a KeyValue instance with a FLOAT64 Value.
+//
+// If creating both key and a float64 value at the same time, then
+// instead of calling Key(name).Float64(value) consider using a
+// convenience function provided by the api/key package -
+// key.Float64(name, value).
+func (k Key) Float64(v float64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Float64Value(v),
+ }
+}
+
+// Int32 creates a KeyValue instance with an INT32 Value.
+//
+// If creating both key and an int32 value at the same time, then
+// instead of calling Key(name).Int32(value) consider using a
+// convenience function provided by the api/key package -
+// key.Int32(name, value).
+func (k Key) Int32(v int32) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Int32Value(v),
+ }
+}
+
+// Uint32 creates a KeyValue instance with a UINT32 Value.
+//
+// If creating both key and a uint32 value at the same time, then
+// instead of calling Key(name).Uint32(value) consider using a
+// convenience function provided by the api/key package -
+// key.Uint32(name, value).
+func (k Key) Uint32(v uint32) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Uint32Value(v),
+ }
+}
+
+// Float32 creates a KeyValue instance with a FLOAT32 Value.
+//
+// If creating both key and a float32 value at the same time, then
+// instead of calling Key(name).Float32(value) consider using a
+// convenience function provided by the api/key package -
+// key.Float32(name, value).
+func (k Key) Float32(v float32) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Float32Value(v),
+ }
+}
+
+// String creates a KeyValue instance with a STRING Value.
+//
+// If creating both key and a string value at the same time, then
+// instead of calling Key(name).String(value) consider using a
+// convenience function provided by the api/key package -
+// key.String(name, value).
+func (k Key) String(v string) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: StringValue(v),
+ }
+}
+
+// Int creates a KeyValue instance with either an INT32 or an INT64
+// Value, depending on whether the int type is 32 or 64 bits wide.
+//
+// If creating both key and an int value at the same time, then
+// instead of calling Key(name).Int(value) consider using a
+// convenience function provided by the api/key package -
+// key.Int(name, value).
+func (k Key) Int(v int) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: IntValue(v),
+ }
+}
+
+// Uint creates a KeyValue instance with either a UINT32 or a UINT64
+// Value, depending on whether the uint type is 32 or 64 bits wide.
+//
+// If creating both key and a uint value at the same time, then
+// instead of calling Key(name).Uint(value) consider using a
+// convenience function provided by the api/key package -
+// key.Uint(name, value).
+func (k Key) Uint(v uint) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: UintValue(v),
+ }
+}
+
+// Defined returns true for non-empty keys.
+func (k Key) Defined() bool {
+ return len(k) != 0
+}
+
+// Array creates a KeyValue instance with a ARRAY Value.
+//
+// If creating both key and a array value at the same time, then
+// instead of calling Key(name).String(value) consider using a
+// convenience function provided by the api/key package -
+// key.Array(name, value).
+func (k Key) Array(v interface{}) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: ArrayValue(v),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/kv.go b/vendor/go.opentelemetry.io/otel/label/kv.go
new file mode 100644
index 0000000..3e2764f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/kv.go
@@ -0,0 +1,144 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+// KeyValue holds a key and value pair.
+type KeyValue struct {
+ Key Key
+ Value Value
+}
+
+// Bool creates a new key-value pair with a passed name and a bool
+// value.
+func Bool(k string, v bool) KeyValue {
+ return Key(k).Bool(v)
+}
+
+// Int64 creates a new key-value pair with a passed name and an int64
+// value.
+func Int64(k string, v int64) KeyValue {
+ return Key(k).Int64(v)
+}
+
+// Uint64 creates a new key-value pair with a passed name and a uint64
+// value.
+func Uint64(k string, v uint64) KeyValue {
+ return Key(k).Uint64(v)
+}
+
+// Float64 creates a new key-value pair with a passed name and a float64
+// value.
+func Float64(k string, v float64) KeyValue {
+ return Key(k).Float64(v)
+}
+
+// Int32 creates a new key-value pair with a passed name and an int32
+// value.
+func Int32(k string, v int32) KeyValue {
+ return Key(k).Int32(v)
+}
+
+// Uint32 creates a new key-value pair with a passed name and a uint32
+// value.
+func Uint32(k string, v uint32) KeyValue {
+ return Key(k).Uint32(v)
+}
+
+// Float32 creates a new key-value pair with a passed name and a float32
+// value.
+func Float32(k string, v float32) KeyValue {
+ return Key(k).Float32(v)
+}
+
+// String creates a new key-value pair with a passed name and a string
+// value.
+func String(k, v string) KeyValue {
+ return Key(k).String(v)
+}
+
+// Stringer creates a new key-value pair with a passed name and a string
+// value generated by the passed Stringer interface.
+func Stringer(k string, v fmt.Stringer) KeyValue {
+ return Key(k).String(v.String())
+}
+
+// Int creates a new key-value pair instance with a passed name and
+// either an int32 or an int64 value, depending on whether the int
+// type is 32 or 64 bits wide.
+func Int(k string, v int) KeyValue {
+ return Key(k).Int(v)
+}
+
+// Uint creates a new key-value pair instance with a passed name and
+// either an uint32 or an uint64 value, depending on whether the uint
+// type is 32 or 64 bits wide.
+func Uint(k string, v uint) KeyValue {
+ return Key(k).Uint(v)
+}
+
+// Array creates a new key-value pair with a passed name and a array.
+// Only arrays of primitive type are supported.
+func Array(k string, v interface{}) KeyValue {
+ return Key(k).Array(v)
+}
+
+// Any creates a new key-value pair instance with a passed name and
+// automatic type inference. This is slower, and not type-safe.
+func Any(k string, value interface{}) KeyValue {
+ if value == nil {
+ return String(k, "<nil>")
+ }
+
+ if stringer, ok := value.(fmt.Stringer); ok {
+ return String(k, stringer.String())
+ }
+
+ rv := reflect.ValueOf(value)
+
+ switch rv.Kind() {
+ case reflect.Array, reflect.Slice:
+ return Array(k, value)
+ case reflect.Bool:
+ return Bool(k, rv.Bool())
+ case reflect.Int, reflect.Int8, reflect.Int16:
+ return Int(k, int(rv.Int()))
+ case reflect.Int32:
+ return Int32(k, int32(rv.Int()))
+ case reflect.Int64:
+ return Int64(k, int64(rv.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16:
+ return Uint(k, uint(rv.Uint()))
+ case reflect.Uint32:
+ return Uint32(k, uint32(rv.Uint()))
+ case reflect.Uint64, reflect.Uintptr:
+ return Uint64(k, rv.Uint())
+ case reflect.Float32:
+ return Float32(k, float32(rv.Float()))
+ case reflect.Float64:
+ return Float64(k, rv.Float())
+ case reflect.String:
+ return String(k, rv.String())
+ }
+ if b, err := json.Marshal(value); value != nil && err == nil {
+ return String(k, string(b))
+ }
+ return String(k, fmt.Sprint(value))
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/set.go b/vendor/go.opentelemetry.io/otel/label/set.go
new file mode 100644
index 0000000..3bd5263
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/set.go
@@ -0,0 +1,468 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+import (
+ "encoding/json"
+ "reflect"
+ "sort"
+ "sync"
+)
+
+type (
+ // Set is the representation for a distinct label set. It
+ // manages an immutable set of labels, with an internal cache
+ // for storing label encodings.
+ //
+ // This type supports the `Equivalent` method of comparison
+ // using values of type `Distinct`.
+ //
+ // This type is used to implement:
+ // 1. Metric labels
+ // 2. Resource sets
+ // 3. Correlation map (TODO)
+ Set struct {
+ equivalent Distinct
+
+ lock sync.Mutex
+ encoders [maxConcurrentEncoders]EncoderID
+ encoded [maxConcurrentEncoders]string
+ }
+
+ // Distinct wraps a variable-size array of `KeyValue`,
+ // constructed with keys in sorted order. This can be used as
+ // a map key or for equality checking between Sets.
+ Distinct struct {
+ iface interface{}
+ }
+
+ // Filter supports removing certain labels from label sets.
+ // When the filter returns true, the label will be kept in
+ // the filtered label set. When the filter returns false, the
+ // label is excluded from the filtered label set, and the
+ // label instead appears in the `removed` list of excluded labels.
+ Filter func(KeyValue) bool
+
+ // Sortable implements `sort.Interface`, used for sorting
+ // `KeyValue`. This is an exported type to support a
+ // memory optimization. A pointer to one of these is needed
+ // for the call to `sort.Stable()`, which the caller may
+ // provide in order to avoid an allocation. See
+ // `NewSetWithSortable()`.
+ Sortable []KeyValue
+)
+
+var (
+ // keyValueType is used in `computeDistinctReflect`.
+ keyValueType = reflect.TypeOf(KeyValue{})
+
+ // emptySet is returned for empty label sets.
+ emptySet = &Set{
+ equivalent: Distinct{
+ iface: [0]KeyValue{},
+ },
+ }
+)
+
+const maxConcurrentEncoders = 3
+
+func EmptySet() *Set {
+ return emptySet
+}
+
+// reflect abbreviates `reflect.ValueOf`.
+func (d Distinct) reflect() reflect.Value {
+ return reflect.ValueOf(d.iface)
+}
+
+// Valid returns true if this value refers to a valid `*Set`.
+func (d Distinct) Valid() bool {
+ return d.iface != nil
+}
+
+// Len returns the number of labels in this set.
+func (l *Set) Len() int {
+ if l == nil || !l.equivalent.Valid() {
+ return 0
+ }
+ return l.equivalent.reflect().Len()
+}
+
+// Get returns the KeyValue at ordered position `idx` in this set.
+func (l *Set) Get(idx int) (KeyValue, bool) {
+ if l == nil {
+ return KeyValue{}, false
+ }
+ value := l.equivalent.reflect()
+
+ if idx >= 0 && idx < value.Len() {
+ // Note: The Go compiler successfully avoids an allocation for
+ // the interface{} conversion here:
+ return value.Index(idx).Interface().(KeyValue), true
+ }
+
+ return KeyValue{}, false
+}
+
+// Value returns the value of a specified key in this set.
+func (l *Set) Value(k Key) (Value, bool) {
+ if l == nil {
+ return Value{}, false
+ }
+ rValue := l.equivalent.reflect()
+ vlen := rValue.Len()
+
+ idx := sort.Search(vlen, func(idx int) bool {
+ return rValue.Index(idx).Interface().(KeyValue).Key >= k
+ })
+ if idx >= vlen {
+ return Value{}, false
+ }
+ keyValue := rValue.Index(idx).Interface().(KeyValue)
+ if k == keyValue.Key {
+ return keyValue.Value, true
+ }
+ return Value{}, false
+}
+
+// HasValue tests whether a key is defined in this set.
+func (l *Set) HasValue(k Key) bool {
+ if l == nil {
+ return false
+ }
+ _, ok := l.Value(k)
+ return ok
+}
+
+// Iter returns an iterator for visiting the labels in this set.
+func (l *Set) Iter() Iterator {
+ return Iterator{
+ storage: l,
+ idx: -1,
+ }
+}
+
+// ToSlice returns the set of labels belonging to this set, sorted,
+// where keys appear no more than once.
+func (l *Set) ToSlice() []KeyValue {
+ iter := l.Iter()
+ return iter.ToSlice()
+}
+
+// Equivalent returns a value that may be used as a map key. The
+// Distinct type guarantees that the result will equal the equivalent
+// Distinct value of any label set with the same elements as this,
+// where sets are made unique by choosing the last value in the input
+// for any given key.
+func (l *Set) Equivalent() Distinct {
+ if l == nil || !l.equivalent.Valid() {
+ return emptySet.equivalent
+ }
+ return l.equivalent
+}
+
+// Equals returns true if the argument set is equivalent to this set.
+func (l *Set) Equals(o *Set) bool {
+ return l.Equivalent() == o.Equivalent()
+}
+
+// Encoded returns the encoded form of this set, according to
+// `encoder`. The result will be cached in this `*Set`.
+func (l *Set) Encoded(encoder Encoder) string {
+ if l == nil || encoder == nil {
+ return ""
+ }
+
+ id := encoder.ID()
+ if !id.Valid() {
+ // Invalid IDs are not cached.
+ return encoder.Encode(l.Iter())
+ }
+
+ var lookup *string
+ l.lock.Lock()
+ for idx := 0; idx < maxConcurrentEncoders; idx++ {
+ if l.encoders[idx] == id {
+ lookup = &l.encoded[idx]
+ break
+ }
+ }
+ l.lock.Unlock()
+
+ if lookup != nil {
+ return *lookup
+ }
+
+ r := encoder.Encode(l.Iter())
+
+ l.lock.Lock()
+ defer l.lock.Unlock()
+
+ for idx := 0; idx < maxConcurrentEncoders; idx++ {
+ if l.encoders[idx] == id {
+ return l.encoded[idx]
+ }
+ if !l.encoders[idx].Valid() {
+ l.encoders[idx] = id
+ l.encoded[idx] = r
+ return r
+ }
+ }
+
+ // TODO: This is a performance cliff. Find a way for this to
+ // generate a warning.
+ return r
+}
+
+func empty() Set {
+ return Set{
+ equivalent: emptySet.equivalent,
+ }
+}
+
+// NewSet returns a new `Set`. See the documentation for
+// `NewSetWithSortableFiltered` for more details.
+//
+// Except for empty sets, this method adds an additional allocation
+// compared with calls that include a `*Sortable`.
+func NewSet(kvs ...KeyValue) Set {
+ // Check for empty set.
+ if len(kvs) == 0 {
+ return empty()
+ }
+ s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
+ return s //nolint
+}
+
+// NewSetWithSortable returns a new `Set`. See the documentation for
+// `NewSetWithSortableFiltered` for more details.
+//
+// This call includes a `*Sortable` option as a memory optimization.
+func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
+ // Check for empty set.
+ if len(kvs) == 0 {
+ return empty()
+ }
+ s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
+ return s //nolint
+}
+
+// NewSetWithFiltered returns a new `Set`. See the documentation for
+// `NewSetWithSortableFiltered` for more details.
+//
+// This call includes a `Filter` to include/exclude label keys from
+// the return value. Excluded keys are returned as a slice of label
+// values.
+func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
+ // Check for empty set.
+ if len(kvs) == 0 {
+ return empty(), nil
+ }
+ return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
+}
+
+// NewSetWithSortableFiltered returns a new `Set`.
+//
+// Duplicate keys are eliminated by taking the last value. This
+// re-orders the input slice so that unique last-values are contiguous
+// at the end of the slice.
+//
+// This ensures the following:
+//
+// - Last-value-wins semantics
+// - Caller sees the reordering, but doesn't lose values
+// - Repeated call preserve last-value wins.
+//
+// Note that methods are defined on `*Set`, although this returns `Set`.
+// Callers can avoid memory allocations by:
+//
+// - allocating a `Sortable` for use as a temporary in this method
+// - allocating a `Set` for storing the return value of this
+// constructor.
+//
+// The result maintains a cache of encoded labels, by label.EncoderID.
+// This value should not be copied after its first use.
+//
+// The second `[]KeyValue` return value is a list of labels that were
+// excluded by the Filter (if non-nil).
+func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
+ // Check for empty set.
+ if len(kvs) == 0 {
+ return empty(), nil
+ }
+
+ *tmp = kvs
+
+ // Stable sort so the following de-duplication can implement
+ // last-value-wins semantics.
+ sort.Stable(tmp)
+
+ *tmp = nil
+
+ position := len(kvs) - 1
+ offset := position - 1
+
+ // The requirements stated above require that the stable
+ // result be placed in the end of the input slice, while
+ // overwritten values are swapped to the beginning.
+ //
+ // De-duplicate with last-value-wins semantics. Preserve
+ // duplicate values at the beginning of the input slice.
+ for ; offset >= 0; offset-- {
+ if kvs[offset].Key == kvs[position].Key {
+ continue
+ }
+ position--
+ kvs[offset], kvs[position] = kvs[position], kvs[offset]
+ }
+ if filter != nil {
+ return filterSet(kvs[position:], filter)
+ }
+ return Set{
+ equivalent: computeDistinct(kvs[position:]),
+ }, nil
+}
+
+// filterSet reorders `kvs` so that included keys are contiguous at
+// the end of the slice, while excluded keys precede the included keys.
+func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
+ var excluded []KeyValue
+
+ // Move labels that do not match the filter so
+ // they're adjacent before calling computeDistinct().
+ distinctPosition := len(kvs)
+
+ // Swap indistinct keys forward and distinct keys toward the
+ // end of the slice.
+ offset := len(kvs) - 1
+ for ; offset >= 0; offset-- {
+ if filter(kvs[offset]) {
+ distinctPosition--
+ kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
+ continue
+ }
+ }
+ excluded = kvs[:distinctPosition]
+
+ return Set{
+ equivalent: computeDistinct(kvs[distinctPosition:]),
+ }, excluded
+}
+
+// Filter returns a filtered copy of this `Set`. See the
+// documentation for `NewSetWithSortableFiltered` for more details.
+func (l *Set) Filter(re Filter) (Set, []KeyValue) {
+ if re == nil {
+ return Set{
+ equivalent: l.equivalent,
+ }, nil
+ }
+
+ // Note: This could be refactored to avoid the temporary slice
+ // allocation, if it proves to be expensive.
+ return filterSet(l.ToSlice(), re)
+}
+
+// computeDistinct returns a `Distinct` using either the fixed- or
+// reflect-oriented code path, depending on the size of the input.
+// The input slice is assumed to already be sorted and de-duplicated.
+func computeDistinct(kvs []KeyValue) Distinct {
+ iface := computeDistinctFixed(kvs)
+ if iface == nil {
+ iface = computeDistinctReflect(kvs)
+ }
+ return Distinct{
+ iface: iface,
+ }
+}
+
+// computeDistinctFixed computes a `Distinct` for small slices. It
+// returns nil if the input is too large for this code path.
+func computeDistinctFixed(kvs []KeyValue) interface{} {
+ switch len(kvs) {
+ case 1:
+ ptr := new([1]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 2:
+ ptr := new([2]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 3:
+ ptr := new([3]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 4:
+ ptr := new([4]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 5:
+ ptr := new([5]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 6:
+ ptr := new([6]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 7:
+ ptr := new([7]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 8:
+ ptr := new([8]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 9:
+ ptr := new([9]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ case 10:
+ ptr := new([10]KeyValue)
+ copy((*ptr)[:], kvs)
+ return *ptr
+ default:
+ return nil
+ }
+}
+
+// computeDistinctReflect computes a `Distinct` using reflection,
+// works for any size input.
+func computeDistinctReflect(kvs []KeyValue) interface{} {
+ at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
+ for i, keyValue := range kvs {
+ *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
+ }
+ return at.Interface()
+}
+
+// MarshalJSON returns the JSON encoding of the `*Set`.
+func (l *Set) MarshalJSON() ([]byte, error) {
+ return json.Marshal(l.equivalent.iface)
+}
+
+// Len implements `sort.Interface`.
+func (l *Sortable) Len() int {
+ return len(*l)
+}
+
+// Swap implements `sort.Interface`.
+func (l *Sortable) Swap(i, j int) {
+ (*l)[i], (*l)[j] = (*l)[j], (*l)[i]
+}
+
+// Less implements `sort.Interface`.
+func (l *Sortable) Less(i, j int) bool {
+ return (*l)[i].Key < (*l)[j].Key
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/type_string.go b/vendor/go.opentelemetry.io/otel/label/type_string.go
new file mode 100644
index 0000000..62afeb6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/type_string.go
@@ -0,0 +1,32 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT.
+
+package label
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[INVALID-0]
+ _ = x[BOOL-1]
+ _ = x[INT32-2]
+ _ = x[INT64-3]
+ _ = x[UINT32-4]
+ _ = x[UINT64-5]
+ _ = x[FLOAT32-6]
+ _ = x[FLOAT64-7]
+ _ = x[STRING-8]
+ _ = x[ARRAY-9]
+}
+
+const _Type_name = "INVALIDBOOLINT32INT64UINT32UINT64FLOAT32FLOAT64STRINGARRAY"
+
+var _Type_index = [...]uint8{0, 7, 11, 16, 21, 27, 33, 40, 47, 53, 58}
+
+func (i Type) String() string {
+ if i < 0 || i >= Type(len(_Type_index)-1) {
+ return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/value.go b/vendor/go.opentelemetry.io/otel/label/value.go
new file mode 100644
index 0000000..679009b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/label/value.go
@@ -0,0 +1,288 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+ "unsafe"
+
+ "go.opentelemetry.io/otel/internal"
+)
+
+//go:generate stringer -type=Type
+
+// Type describes the type of the data Value holds.
+type Type int
+
+// Value represents the value part in key-value pairs.
+type Value struct {
+ vtype Type
+ numeric uint64
+ stringly string
+ // TODO Lazy value type?
+
+ array interface{}
+}
+
+const (
+ INVALID Type = iota // No value.
+ BOOL // Boolean value, use AsBool() to get it.
+ INT32 // 32 bit signed integral value, use AsInt32() to get it.
+ INT64 // 64 bit signed integral value, use AsInt64() to get it.
+ UINT32 // 32 bit unsigned integral value, use AsUint32() to get it.
+ UINT64 // 64 bit unsigned integral value, use AsUint64() to get it.
+ FLOAT32 // 32 bit floating point value, use AsFloat32() to get it.
+ FLOAT64 // 64 bit floating point value, use AsFloat64() to get it.
+ STRING // String value, use AsString() to get it.
+ ARRAY // Array value of arbitrary type, use AsArray() to get it.
+)
+
+// BoolValue creates a BOOL Value.
+func BoolValue(v bool) Value {
+ return Value{
+ vtype: BOOL,
+ numeric: internal.BoolToRaw(v),
+ }
+}
+
+// Int64Value creates an INT64 Value.
+func Int64Value(v int64) Value {
+ return Value{
+ vtype: INT64,
+ numeric: internal.Int64ToRaw(v),
+ }
+}
+
+// Uint64Value creates a UINT64 Value.
+func Uint64Value(v uint64) Value {
+ return Value{
+ vtype: UINT64,
+ numeric: internal.Uint64ToRaw(v),
+ }
+}
+
+// Float64Value creates a FLOAT64 Value.
+func Float64Value(v float64) Value {
+ return Value{
+ vtype: FLOAT64,
+ numeric: internal.Float64ToRaw(v),
+ }
+}
+
+// Int32Value creates an INT32 Value.
+func Int32Value(v int32) Value {
+ return Value{
+ vtype: INT32,
+ numeric: internal.Int32ToRaw(v),
+ }
+}
+
+// Uint32Value creates a UINT32 Value.
+func Uint32Value(v uint32) Value {
+ return Value{
+ vtype: UINT32,
+ numeric: internal.Uint32ToRaw(v),
+ }
+}
+
+// Float32Value creates a FLOAT32 Value.
+func Float32Value(v float32) Value {
+ return Value{
+ vtype: FLOAT32,
+ numeric: internal.Float32ToRaw(v),
+ }
+}
+
+// StringValue creates a STRING Value.
+func StringValue(v string) Value {
+ return Value{
+ vtype: STRING,
+ stringly: v,
+ }
+}
+
+// IntValue creates either an INT32 or an INT64 Value, depending on whether
+// the int type is 32 or 64 bits wide.
+func IntValue(v int) Value {
+ if unsafe.Sizeof(v) == 4 {
+ return Int32Value(int32(v))
+ }
+ return Int64Value(int64(v))
+}
+
+// UintValue creates either a UINT32 or a UINT64 Value, depending on whether
+// the uint type is 32 or 64 bits wide.
+func UintValue(v uint) Value {
+ if unsafe.Sizeof(v) == 4 {
+ return Uint32Value(uint32(v))
+ }
+ return Uint64Value(uint64(v))
+}
+
+// ArrayValue creates an ARRAY value from an array or slice.
+// Only arrays or slices of bool, int, int32, int64, uint, uint32, uint64,
+// float, float32, float64, or string types are allowed. Specifically, arrays
+// and slices can not contain other arrays, slices, structs, or non-standard
+// types. If the passed value is not an array or slice of these types an
+// INVALID value is returned.
+func ArrayValue(v interface{}) Value {
+ switch reflect.TypeOf(v).Kind() {
+ case reflect.Array, reflect.Slice:
+ // get array type regardless of dimensions
+ typ := reflect.TypeOf(v).Elem()
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Bool, reflect.Int, reflect.Int32, reflect.Int64,
+ reflect.Float32, reflect.Float64, reflect.String,
+ reflect.Uint, reflect.Uint32, reflect.Uint64:
+ val := reflect.ValueOf(v)
+ length := val.Len()
+ frozen := reflect.Indirect(reflect.New(reflect.ArrayOf(length, typ)))
+ reflect.Copy(frozen, val)
+ return Value{
+ vtype: ARRAY,
+ array: frozen.Interface(),
+ }
+ default:
+ return Value{vtype: INVALID}
+ }
+ }
+ return Value{vtype: INVALID}
+}
+
+// Type returns a type of the Value.
+func (v Value) Type() Type {
+ return v.vtype
+}
+
+// AsBool returns the bool value. Make sure that the Value's type is
+// BOOL.
+func (v Value) AsBool() bool {
+ return internal.RawToBool(v.numeric)
+}
+
+// AsInt32 returns the int32 value. Make sure that the Value's type is
+// INT32.
+func (v Value) AsInt32() int32 {
+ return internal.RawToInt32(v.numeric)
+}
+
+// AsInt64 returns the int64 value. Make sure that the Value's type is
+// INT64.
+func (v Value) AsInt64() int64 {
+ return internal.RawToInt64(v.numeric)
+}
+
+// AsUint32 returns the uint32 value. Make sure that the Value's type
+// is UINT32.
+func (v Value) AsUint32() uint32 {
+ return internal.RawToUint32(v.numeric)
+}
+
+// AsUint64 returns the uint64 value. Make sure that the Value's type is
+// UINT64.
+func (v Value) AsUint64() uint64 {
+ return internal.RawToUint64(v.numeric)
+}
+
+// AsFloat32 returns the float32 value. Make sure that the Value's
+// type is FLOAT32.
+func (v Value) AsFloat32() float32 {
+ return internal.RawToFloat32(v.numeric)
+}
+
+// AsFloat64 returns the float64 value. Make sure that the Value's
+// type is FLOAT64.
+func (v Value) AsFloat64() float64 {
+ return internal.RawToFloat64(v.numeric)
+}
+
+// AsString returns the string value. Make sure that the Value's type
+// is STRING.
+func (v Value) AsString() string {
+ return v.stringly
+}
+
+// AsArray returns the array Value as an interface{}.
+func (v Value) AsArray() interface{} {
+ return v.array
+}
+
+type unknownValueType struct{}
+
+// AsInterface returns Value's data as interface{}.
+func (v Value) AsInterface() interface{} {
+ switch v.Type() {
+ case ARRAY:
+ return v.AsArray()
+ case BOOL:
+ return v.AsBool()
+ case INT32:
+ return v.AsInt32()
+ case INT64:
+ return v.AsInt64()
+ case UINT32:
+ return v.AsUint32()
+ case UINT64:
+ return v.AsUint64()
+ case FLOAT32:
+ return v.AsFloat32()
+ case FLOAT64:
+ return v.AsFloat64()
+ case STRING:
+ return v.stringly
+ }
+ return unknownValueType{}
+}
+
+// Emit returns a string representation of Value's data.
+func (v Value) Emit() string {
+ switch v.Type() {
+ case ARRAY:
+ return fmt.Sprint(v.array)
+ case BOOL:
+ return strconv.FormatBool(v.AsBool())
+ case INT32:
+ return strconv.FormatInt(int64(v.AsInt32()), 10)
+ case INT64:
+ return strconv.FormatInt(v.AsInt64(), 10)
+ case UINT32:
+ return strconv.FormatUint(uint64(v.AsUint32()), 10)
+ case UINT64:
+ return strconv.FormatUint(v.AsUint64(), 10)
+ case FLOAT32:
+ return fmt.Sprint(v.AsFloat32())
+ case FLOAT64:
+ return fmt.Sprint(v.AsFloat64())
+ case STRING:
+ return v.stringly
+ default:
+ return "unknown"
+ }
+}
+
+// MarshalJSON returns the JSON encoding of the Value.
+func (v Value) MarshalJSON() ([]byte, error) {
+ var jsonVal struct {
+ Type string
+ Value interface{}
+ }
+ jsonVal.Type = v.Type().String()
+ jsonVal.Value = v.AsInterface()
+ return json.Marshal(jsonVal)
+}
diff --git a/vendor/go.opentelemetry.io/otel/otel.go b/vendor/go.opentelemetry.io/otel/otel.go
new file mode 100644
index 0000000..aaabac2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/otel.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel
+
+import (
+ "go.opentelemetry.io/otel/api/metric"
+ "go.opentelemetry.io/otel/api/trace"
+)
+
+type Tracer = trace.Tracer
+
+type Meter = metric.Meter
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+ // Handle handles any error deemed irremediable by an OpenTelemetry
+ // component.
+ Handle(error)
+}
diff --git a/vendor/go.opentelemetry.io/otel/pre_release.sh b/vendor/go.opentelemetry.io/otel/pre_release.sh
new file mode 100644
index 0000000..e09924b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/pre_release.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+help()
+{
+ printf "\n"
+ printf "Usage: $0 -t tag\n"
+ printf "\t-t Unreleased tag. Update all go.mod with this tag.\n"
+ exit 1 # Exit script after printing help
+}
+
+while getopts "t:" opt
+do
+ case "$opt" in
+ t ) TAG="$OPTARG" ;;
+ ? ) help ;; # Print help
+ esac
+done
+
+# Print help in case parameters are empty
+if [ -z "$TAG" ]
+then
+ printf "Tag is missing\n";
+ help
+fi
+
+# Validate semver
+SEMVER_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
+if [[ "${TAG}" =~ ${SEMVER_REGEX} ]]; then
+ printf "${TAG} is valid semver tag.\n"
+else
+ printf "${TAG} is not a valid semver tag.\n"
+ exit -1
+fi
+
+TAG_FOUND=`git tag --list ${TAG}`
+if [[ ${TAG_FOUND} = ${TAG} ]] ; then
+ printf "Tag ${TAG} already exists\n"
+ exit -1
+fi
+
+# Get version for sdk/opentelemetry.go
+OTEL_VERSION=$(echo "${TAG}" | grep -o '^v[0-9]\+\.[0-9]\+\.[0-9]\+')
+# Strip leading v
+OTEL_VERSION="${OTEL_VERSION#v}"
+
+cd $(dirname $0)
+
+if ! git diff --quiet; then \
+ printf "Working tree is not clean, can't proceed with the release process\n"
+ git status
+ git diff
+ exit 1
+fi
+
+# Update sdk/opentelemetry.go
+cp ./sdk/opentelemetry.go ./sdk/opentelemetry.go.bak
+sed "s/\(return \"\)[0-9]*\.[0-9]*\.[0-9]*\"/\1${OTEL_VERSION}\"/" ./sdk/opentelemetry.go.bak >./sdk/opentelemetry.go
+rm -f ./sdk/opentelemetry.go.bak
+
+# Update go.mod
+git checkout -b pre_release_${TAG} master
+PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep -v 'tools' | sed 's/^\.\///' | sort)
+
+for dir in $PACKAGE_DIRS; do
+ cp "${dir}/go.mod" "${dir}/go.mod.bak"
+ sed "s/opentelemetry.io\/otel\([^ ]*\) v[0-9]*\.[0-9]*\.[0-9]/opentelemetry.io\/otel\1 ${TAG}/" "${dir}/go.mod.bak" >"${dir}/go.mod"
+ rm -f "${dir}/go.mod.bak"
+done
+
+# Run lint to update go.sum
+make lint
+
+# Add changes and commit.
+git add .
+make ci
+git commit -m "Prepare for releasing $TAG"
+
+printf "Now run following to verify the changes.\ngit diff master\n"
+printf "\nThen push the changes to upstream\n"
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
new file mode 100644
index 0000000..8e3bd8d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation.go
@@ -0,0 +1,78 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package otel
+
+import "context"
+
+// TextMapCarrier is the storage medium used by a TextMapPropagator.
+type TextMapCarrier interface {
+ // Get returns the value associated with the passed key.
+ Get(key string) string
+ // Set stores the key-value pair.
+ Set(key string, value string)
+}
+
+// TextMapPropagator propagates cross-cutting concerns as key-value text
+// pairs within a carrier that travels in-band across process boundaries.
+type TextMapPropagator interface {
+ // Inject set cross-cutting concerns from the Context into the carrier.
+ Inject(ctx context.Context, carrier TextMapCarrier)
+ // Extract reads cross-cutting concerns from the carrier into a Context.
+ Extract(ctx context.Context, carrier TextMapCarrier) context.Context
+ // Fields returns the keys who's values are set with Inject.
+ Fields() []string
+}
+
+type compositeTextMapPropagator []TextMapPropagator
+
+func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
+ for _, i := range p {
+ i.Inject(ctx, carrier)
+ }
+}
+
+func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+ for _, i := range p {
+ ctx = i.Extract(ctx, carrier)
+ }
+ return ctx
+}
+
+func (p compositeTextMapPropagator) Fields() []string {
+ unique := make(map[string]struct{})
+ for _, i := range p {
+ for _, k := range i.Fields() {
+ unique[k] = struct{}{}
+ }
+ }
+
+ fields := make([]string, 0, len(unique))
+ for k := range unique {
+ fields = append(fields, k)
+ }
+ return fields
+}
+
+// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
+// group of passed TextMapPropagator. This allows different cross-cutting
+// concerns to be propagates in a unified manner.
+//
+// The returned TextMapPropagator will inject and extract cross-cutting
+// concerns in the order the TextMapPropagators were provided. Additionally,
+// the Fields method will return a de-duplicated slice of the keys that are
+// set with the Inject method.
+func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
+ return compositeTextMapPropagator(p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/tag.sh b/vendor/go.opentelemetry.io/otel/tag.sh
new file mode 100644
index 0000000..70767c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/tag.sh
@@ -0,0 +1,178 @@
+#!/usr/bin/env bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+readonly PROGNAME=$(basename "$0")
+readonly PROGDIR=$(readlink -m "$(dirname "$0")")
+
+readonly EXCLUDE_PACKAGES="internal/tools"
+readonly SEMVER_REGEX="v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?"
+
+usage() {
+ cat <<- EOF
+Usage: $PROGNAME [OPTIONS] SEMVER_TAG COMMIT_HASH
+
+Creates git tag for all Go packages in project.
+
+OPTIONS:
+ -h --help Show this help.
+
+ARGUMENTS:
+ SEMVER_TAG Semantic version to tag with.
+ COMMIT_HASH Git commit hash to tag.
+EOF
+}
+
+cmdline() {
+ local arg commit
+
+ for arg
+ do
+ local delim=""
+ case "$arg" in
+ # Translate long form options to short form.
+ --help) args="${args}-h ";;
+ # Pass through for everything else.
+ *) [[ "${arg:0:1}" == "-" ]] || delim="\""
+ args="${args}${delim}${arg}${delim} ";;
+ esac
+ done
+
+ # Reset and process short form options.
+ eval set -- "$args"
+
+ while getopts "h" OPTION
+ do
+ case $OPTION in
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "unknown option: $OPTION"
+ usage
+ exit 1
+ ;;
+ esac
+ done
+
+ # Positional arguments.
+ shift $((OPTIND-1))
+ readonly TAG="$1"
+ if [ -z "$TAG" ]
+ then
+ echo "missing SEMVER_TAG"
+ usage
+ exit 1
+ fi
+ if [[ ! "$TAG" =~ $SEMVER_REGEX ]]
+ then
+ printf "invalid semantic version: %s\n" "$TAG"
+ exit 2
+ fi
+ if [[ "$( git tag --list "$TAG" )" ]]
+ then
+ printf "tag already exists: %s\n" "$TAG"
+ exit 2
+ fi
+
+ shift
+ commit="$1"
+ if [ -z "$commit" ]
+ then
+ echo "missing COMMIT_HASH"
+ usage
+ exit 1
+ fi
+ # Verify rev is for a commit and unify hashes into a complete SHA1.
+ readonly SHA="$( git rev-parse --quiet --verify "${commit}^{commit}" )"
+ if [ -z "$SHA" ]
+ then
+ printf "invalid commit hash: %s\n" "$commit"
+ exit 2
+ fi
+ if [ "$( git merge-base "$SHA" HEAD )" != "$SHA" ]
+ then
+ printf "commit '%s' not found on this branch\n" "$commit"
+ exit 2
+ fi
+}
+
+package_dirs() {
+ # Return a list of package directories in the form:
+ #
+ # package/directory/a
+ # package/directory/b
+ # deeper/package/directory/a
+ # ...
+ #
+ # Making sure to exclude any packages in the EXCLUDE_PACKAGES regexp.
+ find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
+ | grep -E -v "$EXCLUDE_PACKAGES" \
+ | sed 's/^\.\///' \
+ | sort
+}
+
+git_tag() {
+ local tag="$1"
+ local commit="$2"
+
+ git tag -a "$tag" -s -m "Version $tag" "$commit"
+}
+
+previous_version() {
+ local current="$1"
+
+ # Requires git > 2.0
+ git tag -l --sort=v:refname \
+ | grep -E "^${SEMVER_REGEX}$" \
+ | grep -v "$current" \
+ | tail -1
+}
+
+print_changes() {
+ local tag="$1"
+ local previous
+
+ previous="$( previous_version "$tag" )"
+ if [ -n "$previous" ]
+ then
+ printf "\nRaw changes made between %s and %s\n" "$previous" "$tag"
+ printf "======================================\n"
+ git --no-pager log --pretty=oneline "${previous}..$tag"
+ fi
+}
+
+main() {
+ local dir
+
+ cmdline "$@"
+
+ cd "$PROGDIR" || exit 3
+
+ # Create tag for root package.
+ git_tag "$TAG" "$SHA"
+ printf "created tag: %s\n" "$TAG"
+
+ # Create tag for all sub-packages.
+ for dir in $( package_dirs )
+ do
+ git_tag "${dir}/$TAG" "$SHA"
+ printf "created tag: %s\n" "${dir}/$TAG"
+ done
+
+ print_changes "$TAG"
+}
+main "$@"
diff --git a/vendor/go.opentelemetry.io/otel/unit/doc.go b/vendor/go.opentelemetry.io/otel/unit/doc.go
new file mode 100644
index 0000000..310a7b2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/unit/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package unit provides units.
+package unit // import "go.opentelemetry.io/otel/unit"
diff --git a/vendor/go.opentelemetry.io/otel/unit/unit.go b/vendor/go.opentelemetry.io/otel/unit/unit.go
new file mode 100644
index 0000000..dcd39af
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/unit/unit.go
@@ -0,0 +1,23 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package unit
+
+type Unit string
+
+const (
+ Dimensionless Unit = "1"
+ Bytes Unit = "By"
+ Milliseconds Unit = "ms"
+)
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
new file mode 100644
index 0000000..dbb61a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -euo pipefail
+
+cd $(dirname $0)
+TOOLS_DIR=$(pwd)/.tools
+
+if [ -z "${GOPATH}" ] ; then
+ printf "GOPATH is not defined.\n"
+ exit -1
+fi
+
+if [ ! -d "${GOPATH}" ] ; then
+ printf "GOPATH ${GOPATH} is invalid \n"
+ exit -1
+fi
+
+# Pre-requisites
+if ! git diff --quiet; then \
+ git status
+ printf "\n\nError: working tree is not clean\n"
+ exit -1
+fi
+
+if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
+ printf "$(git log -1)"
+ printf "\n\nError: HEAD is not pointing to a tagged version"
+fi
+
+make ${TOOLS_DIR}/gojq
+
+DIR_TMP="${GOPATH}/src/oteltmp/"
+rm -rf $DIR_TMP
+mkdir -p $DIR_TMP
+
+printf "Copy examples to ${DIR_TMP}\n"
+cp -a ./example ${DIR_TMP}
+
+# Update go.mod files
+printf "Update go.mod: rename module and remove replace\n"
+
+PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
+
+for dir in $PACKAGE_DIRS; do
+ printf " Update go.mod for $dir\n"
+ (cd "${DIR_TMP}/${dir}" && \
+ # replaces is ("mod1" "mod2" …)
+ replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
+ # strip double quotes
+ replaces=("${replaces[@]%\"}") && \
+ replaces=("${replaces[@]#\"}") && \
+ # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
+ dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
+ go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
+ go mod tidy)
+done
+printf "Update done:\n\n"
+
+# Build directories that contain main package. These directories are different than
+# directories that contain go.mod files.
+printf "Build examples:\n"
+EXAMPLES=$(./get_main_pkgs.sh ./example)
+for ex in $EXAMPLES; do
+ printf " Build $ex in ${DIR_TMP}/${ex}\n"
+ (cd "${DIR_TMP}/${ex}" && \
+ go build .)
+done
+
+# Cleanup
+printf "Remove copied files.\n"
+rm -rf $DIR_TMP
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 1661bec..ad6ab6c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -103,6 +103,8 @@
github.com/davecgh/go-spew/spew
# github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/dgrijalva/jwt-go
+# github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+github.com/dgryski/go-rendezvous
# github.com/dustin/go-humanize v1.0.0
github.com/dustin/go-humanize
# github.com/eapache/go-resiliency v1.2.0
@@ -111,6 +113,14 @@
github.com/eapache/go-xerial-snappy
# github.com/eapache/queue v1.1.0
github.com/eapache/queue
+# github.com/go-redis/redis/v8 v8.3.4
+github.com/go-redis/redis/v8
+github.com/go-redis/redis/v8/internal
+github.com/go-redis/redis/v8/internal/hashtag
+github.com/go-redis/redis/v8/internal/pool
+github.com/go-redis/redis/v8/internal/proto
+github.com/go-redis/redis/v8/internal/rand
+github.com/go-redis/redis/v8/internal/util
# github.com/gogo/protobuf v1.3.2
## explicit
github.com/gogo/protobuf/gogoproto
@@ -221,7 +231,7 @@
github.com/modern-go/concurrent
# github.com/modern-go/reflect2 v1.0.1
github.com/modern-go/reflect2
-# github.com/opencord/voltha-lib-go/v7 v7.1.8
+# github.com/opencord/voltha-lib-go/v7 v7.3.2
## explicit
github.com/opencord/voltha-lib-go/v7/pkg/adapters/common
github.com/opencord/voltha-lib-go/v7/pkg/config
@@ -237,7 +247,7 @@
github.com/opencord/voltha-lib-go/v7/pkg/mocks/kafka
github.com/opencord/voltha-lib-go/v7/pkg/probe
github.com/opencord/voltha-lib-go/v7/pkg/version
-# github.com/opencord/voltha-protos/v5 v5.2.5
+# github.com/opencord/voltha-protos/v5 v5.3.8
## explicit
github.com/opencord/voltha-protos/v5/go/adapter_service
github.com/opencord/voltha-protos/v5/go/common
@@ -323,6 +333,19 @@
go.etcd.io/etcd/clientv3
go.etcd.io/etcd/embed
go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes
+# go.opentelemetry.io/otel v0.13.0
+go.opentelemetry.io/otel
+go.opentelemetry.io/otel/api/global
+go.opentelemetry.io/otel/api/global/internal
+go.opentelemetry.io/otel/api/metric
+go.opentelemetry.io/otel/api/metric/registry
+go.opentelemetry.io/otel/api/trace
+go.opentelemetry.io/otel/codes
+go.opentelemetry.io/otel/internal
+go.opentelemetry.io/otel/internal/baggage
+go.opentelemetry.io/otel/internal/trace/noop
+go.opentelemetry.io/otel/label
+go.opentelemetry.io/otel/unit
# go.uber.org/atomic v1.7.0
go.uber.org/atomic
# go.uber.org/multierr v1.6.0