This update cleans up the vendor directory and also keeps only
what is required in the Dockerfiles. While these changes have
been tested locally and it works, the real test will be after a
merge, pulling down a new cloned version and running the build
again.
Change-Id: I589b5eddc6815108e6dfd40f773e6c3db8bf7bb9
diff --git a/BUILD.md b/BUILD.md
index 9d6afa1..2607e4f 100644
--- a/BUILD.md
+++ b/BUILD.md
@@ -80,11 +80,6 @@
necessary dependencies are located under the vendor directory. Whenever, a new package is added to the
project, please run "dep ensure" to update the appropriate deb files as well as the vendor library.
-*Note*: For some reasons (to be investigated) deb does not detect the ```github.com/cores/etcd``` dependency
-correctly. It had to be added manually. This means everytime a ```dep ensure``` is executed the etcd dependency will
-be removed along as with the directory under the vendor directory. Until this issue is resolved, please
-run ```dep ensure -add github.com/coreos/etcd``` everytime a "dep ensure" is executed.
-
To build the voltha core:
```
make rw_core
diff --git a/Gopkg.lock b/Gopkg.lock
index 0cd7e97..9b1ce63 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -40,14 +40,6 @@
revision = "3df31a1ada83e310c2e24b267c8e8b68836547b4"
[[projects]]
- branch = "master"
- digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- pruneopts = "UT"
- revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
-
-[[projects]]
digest = "1:526d64d0a3ac6c24875724a9355895be56a21f89a5d3ab5ba88d91244269a7d8"
name = "github.com/bsm/sarama-cluster"
packages = ["."]
@@ -64,133 +56,20 @@
revision = "0efaee1733e3399a3cb88fc7d2ce340bf2e863d7"
[[projects]]
- digest = "1:c28625428387b63dd7154eb857f51e700465cfbf7c06f619e71f2da33cefe47e"
- name = "github.com/coreos/bbolt"
- packages = ["."]
- pruneopts = "UT"
- revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:f289be21debc99fde35e61caade2d61ec7803ba613c94b98f09b2d3e0990cb62"
+ digest = "1:b4ba0dcf39b5ba06f69d9cd4eaacaba334801073099eb8be273d595094c99db3"
name = "github.com/coreos/etcd"
packages = [
- ".",
- "alarm",
- "auth",
"auth/authpb",
- "client",
- "clientv3",
- "clientv3/concurrency",
- "clientv3/leasing",
- "clientv3/namespace",
- "clientv3/naming",
- "clientv3/ordering",
- "compactor",
- "discovery",
- "embed",
- "error",
- "etcdmain",
- "etcdserver",
- "etcdserver/api",
- "etcdserver/api/etcdhttp",
- "etcdserver/api/v2http",
- "etcdserver/api/v2http/httptypes",
- "etcdserver/api/v2v3",
- "etcdserver/api/v3client",
- "etcdserver/api/v3election",
- "etcdserver/api/v3election/v3electionpb",
- "etcdserver/api/v3election/v3electionpb/gw",
- "etcdserver/api/v3lock",
- "etcdserver/api/v3lock/v3lockpb",
- "etcdserver/api/v3lock/v3lockpb/gw",
- "etcdserver/api/v3rpc",
"etcdserver/api/v3rpc/rpctypes",
- "etcdserver/auth",
"etcdserver/etcdserverpb",
- "etcdserver/etcdserverpb/gw",
- "etcdserver/membership",
- "etcdserver/stats",
- "lease",
- "lease/leasehttp",
- "lease/leasepb",
- "mvcc",
- "mvcc/backend",
"mvcc/mvccpb",
- "pkg/adt",
- "pkg/contention",
- "pkg/cors",
- "pkg/cpuutil",
- "pkg/crc",
- "pkg/debugutil",
- "pkg/fileutil",
- "pkg/flags",
- "pkg/httputil",
- "pkg/idutil",
- "pkg/ioutil",
- "pkg/logutil",
- "pkg/netutil",
- "pkg/osutil",
- "pkg/pathutil",
- "pkg/pbutil",
- "pkg/runtime",
- "pkg/schedule",
- "pkg/srv",
- "pkg/tlsutil",
- "pkg/transport",
"pkg/types",
- "pkg/wait",
- "proxy/grpcproxy",
- "proxy/grpcproxy/adapter",
- "proxy/grpcproxy/cache",
- "proxy/httpproxy",
- "proxy/tcpproxy",
- "raft",
- "raft/raftpb",
- "rafthttp",
- "snap",
- "snap/snappb",
- "store",
- "version",
- "wal",
- "wal/walpb",
]
pruneopts = "UT"
revision = "2cf9e51d2a78003b164c2998886158e60ded1cbb"
version = "v3.3.11"
[[projects]]
- digest = "1:0ef770954bca104ee99b3b6b7f9b240605ac03517d9f98cbc1893daa03f3c038"
- name = "github.com/coreos/go-semver"
- packages = ["semver"]
- pruneopts = "UT"
- revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
- version = "v0.2.0"
-
-[[projects]]
- digest = "1:bf1ec2f5361b43bcc281b2017fb9c05de39a747725389e6e2e825ff8bb37adfc"
- name = "github.com/coreos/go-systemd"
- packages = [
- "daemon",
- "journal",
- "util",
- ]
- pruneopts = "UT"
- revision = "9002847aa1425fb6ac49077c0a630b3b67e0fbfd"
- version = "v18"
-
-[[projects]]
- digest = "1:129a158ba1ebf652f53b189d61dcf9fbfca8ac70b36bcb48a501200a21fb6086"
- name = "github.com/coreos/pkg"
- packages = [
- "capnslog",
- "dlopen",
- ]
- pruneopts = "UT"
- revision = "97fdf19511ea361ae1c100dd393cc47f8dcfa1e1"
- version = "v4"
-
-[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
@@ -199,14 +78,6 @@
version = "v1.1.1"
[[projects]]
- digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
- name = "github.com/dgrijalva/jwt-go"
- packages = ["."]
- pruneopts = "UT"
- revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
- version = "v3.2.0"
-
-[[projects]]
digest = "1:1f0c7ab489b407a7f8f9ad16c25a504d28ab461517a971d341388a56156c1bd7"
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
@@ -231,14 +102,6 @@
version = "v1.1.0"
[[projects]]
- digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda"
- name = "github.com/ghodss/yaml"
- packages = ["."]
- pruneopts = "UT"
- revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
- version = "v1.0.0"
-
-[[projects]]
digest = "1:a9c85389dbd301c97a3499fe15a2b65b505b5f0cb0f1120dea59f1f3d6b11d96"
name = "github.com/gogo/protobuf"
packages = [
@@ -259,19 +122,10 @@
revision = "59788d5eb2591d3497ffb8fafed2f16fe00e7775"
[[projects]]
- branch = "master"
- digest = "1:3fb07f8e222402962fa190eb060608b34eddfb64562a18e2167df2de0ece85d8"
- name = "github.com/golang/groupcache"
- packages = ["lru"]
- pruneopts = "UT"
- revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa"
-
-[[projects]]
- digest = "1:1c31dba75f840ef15223c1d792a2e67e76ccd76ddc528fa23ff19320dde05463"
+ digest = "1:d0f384a62cb855617be65bf8fbad8b9133965955537b950b03e67993ee5fcae3"
name = "github.com/golang/protobuf"
packages = [
"descriptor",
- "jsonpb",
"proto",
"protoc-gen-go",
"protoc-gen-go/descriptor",
@@ -283,7 +137,6 @@
"ptypes/any",
"ptypes/duration",
"ptypes/empty",
- "ptypes/struct",
"ptypes/timestamp",
]
pruneopts = "UT"
@@ -299,14 +152,6 @@
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
[[projects]]
- branch = "master"
- digest = "1:0bfbe13936953a98ae3cfe8ed6670d396ad81edf069a806d2f6515d7bb6950df"
- name = "github.com/google/btree"
- packages = ["."]
- pruneopts = "UT"
- revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
-
-[[projects]]
digest = "1:236d7e1bdb50d8f68559af37dbcf9d142d56b431c9b2176d41e2a009b664cda8"
name = "github.com/google/uuid"
packages = ["."]
@@ -315,42 +160,6 @@
version = "v1.1.0"
[[projects]]
- digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d"
- name = "github.com/gorilla/websocket"
- packages = ["."]
- pruneopts = "UT"
- revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d"
- version = "v1.4.0"
-
-[[projects]]
- digest = "1:1168584a5881d371e96cb0e66ef6db71d7cef0856cc7f311490bc856627f8328"
- name = "github.com/grpc-ecosystem/go-grpc-middleware"
- packages = ["."]
- pruneopts = "UT"
- revision = "c250d6563d4d4c20252cd865923440e829844f4e"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:9b7a07ac7577787a8ecc1334cb9f34df1c76ed82a917d556c5713d3ab84fbc43"
- name = "github.com/grpc-ecosystem/go-grpc-prometheus"
- packages = ["."]
- pruneopts = "UT"
- revision = "c225b8c3b01faf2899099b768856a9e916e5087b"
- version = "v1.2.0"
-
-[[projects]]
- digest = "1:3cbc83a159e161c6cf3e64590ae19da29069ecd8d92f9400ab6fda15e93cbc5f"
- name = "github.com/grpc-ecosystem/grpc-gateway"
- packages = [
- "runtime",
- "runtime/internal",
- "utilities",
- ]
- pruneopts = "UT"
- revision = "aeab1d96e0f1368d243e2e5f526aa29d495517bb"
- version = "v1.5.1"
-
-[[projects]]
digest = "1:816a9a3902cc9d973a62475b829ab044cd46bbd7f064d317372ff868724cce89"
name = "github.com/gyuho/goraph"
packages = ["."]
@@ -389,38 +198,6 @@
revision = "19bbd39e421bdf3559d5025fb2c760f5ffa56233"
[[projects]]
- digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
- name = "github.com/inconshreveable/mousetrap"
- packages = ["."]
- pruneopts = "UT"
- revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
- version = "v1.0"
-
-[[projects]]
- digest = "1:75ab90ae3f5d876167e60f493beadfe66f0ed861a710f283fb06c86437a09538"
- name = "github.com/jonboulle/clockwork"
- packages = ["."]
- pruneopts = "UT"
- revision = "2eee05ed794112d45db504eb05aa693efd2b8b09"
- version = "v0.1.0"
-
-[[projects]]
- digest = "1:0a69a1c0db3591fcefb47f115b224592c8dfa4368b7ba9fae509d5e16cdc95c8"
- name = "github.com/konsorten/go-windows-terminal-sequences"
- packages = ["."]
- pruneopts = "UT"
- revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- pruneopts = "UT"
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
digest = "1:78bbb1ba5b7c3f2ed0ea1eab57bdd3859aec7e177811563edc41198a760b06af"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
@@ -436,14 +213,6 @@
revision = "5a380f224700b8a6c4eaad048804f5bff514cb35"
[[projects]]
- branch = "master"
- digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
- name = "github.com/petar/GoLLRB"
- packages = ["llrb"]
- pruneopts = "UT"
- revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
-
-[[projects]]
digest = "1:e39a5ee8fcbec487f8fc68863ef95f2b025e0739b0e4aa55558a2b4cf8f0ecf0"
name = "github.com/pierrec/lz4"
packages = [
@@ -463,51 +232,6 @@
version = "v1.0.0"
[[projects]]
- digest = "1:93a746f1060a8acbcf69344862b2ceced80f854170e1caae089b2834c5fbf7f4"
- name = "github.com/prometheus/client_golang"
- packages = [
- "prometheus",
- "prometheus/internal",
- "prometheus/promhttp",
- ]
- pruneopts = "UT"
- revision = "505eaef017263e299324067d40ca2c48f6a2cf50"
- version = "v0.9.2"
-
-[[projects]]
- branch = "master"
- digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- pruneopts = "UT"
- revision = "56726106282f1985ea77d5305743db7231b0c0a8"
-
-[[projects]]
- branch = "master"
- digest = "1:ce62b400185bf6b16ef6088011b719e449f5c15c4adb6821589679f752c2788e"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model",
- ]
- pruneopts = "UT"
- revision = "2998b132700a7d019ff618c06a234b47c1f3f681"
-
-[[projects]]
- branch = "master"
- digest = "1:08eb8b60450efe841e37512d66ce366a87d187505d7c67b99307a6c1803483a2"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/util",
- "nfs",
- "xfs",
- ]
- pruneopts = "UT"
- revision = "b1a0a9a36d7453ba0f62578b99712f3a6c5f82d1"
-
-[[projects]]
branch = "master"
digest = "1:d38f81081a389f1466ec98192cf9115a82158854d6f01e1c23e2e7554b97db71"
name = "github.com/rcrowley/go-metrics"
@@ -516,38 +240,6 @@
revision = "3113b8401b8a98917cde58f8bbd42a1b1c03b1fd"
[[projects]]
- digest = "1:87c2e02fb01c27060ccc5ba7c5a407cc91147726f8f40b70cceeedbc52b1f3a8"
- name = "github.com/sirupsen/logrus"
- packages = ["."]
- pruneopts = "UT"
- revision = "e1e72e9de974bd926e5c56f83753fba2df402ce5"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:30e06e6d62a1d694e3cdbff29d8a9a96022e05a487d0c6eaf0ef898965ef28fb"
- name = "github.com/soheilhy/cmux"
- packages = ["."]
- pruneopts = "UT"
- revision = "e09e9389d85d8492d313d73d1469c029e710623f"
- version = "v0.1.4"
-
-[[projects]]
- digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939"
- name = "github.com/spf13/cobra"
- packages = ["."]
- pruneopts = "UT"
- revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
- version = "v0.0.3"
-
-[[projects]]
- digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
- name = "github.com/spf13/pflag"
- packages = ["."]
- pruneopts = "UT"
- revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
- version = "v1.0.3"
-
-[[projects]]
digest = "1:972c2427413d41a1e06ca4897e8528e5a1622894050e2f527b38ddf0f343f759"
name = "github.com/stretchr/testify"
packages = ["assert"]
@@ -556,30 +248,6 @@
version = "v1.3.0"
[[projects]]
- branch = "master"
- digest = "1:d4e58a50951cee7efe774acb441b5a3b75c99f9fd8cc6d58a3146b4de190e14d"
- name = "github.com/tmc/grpc-websocket-proxy"
- packages = ["wsproxy"]
- pruneopts = "UT"
- revision = "0ad062ec5ee553a48f6dbd280b7a1b5638e8a113"
-
-[[projects]]
- digest = "1:03aa6e485e528acb119fb32901cf99582c380225fc7d5a02758e08b180cb56c3"
- name = "github.com/ugorji/go"
- packages = ["codec"]
- pruneopts = "UT"
- revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:6dff6d02950c110d7d61da0c200eaff9da9f312101291b2d8c07235954eaa19d"
- name = "github.com/xiang90/probing"
- packages = ["."]
- pruneopts = "UT"
- revision = "07dd2e8dfe18522e9c447ba95f2fe95262f63bb2"
- version = "0.0.1"
-
-[[projects]]
digest = "1:0324f6a07ddafbb36e9260c8ec331108744e29e1df856141e7df6d5c7ce59501"
name = "go.etcd.io/etcd"
packages = [
@@ -623,18 +291,6 @@
[[projects]]
branch = "master"
- digest = "1:427cdd2c12a3290ace873d2f9d3958d126c4d75596dbc09de05852da08a3dab5"
- name = "golang.org/x/crypto"
- packages = [
- "bcrypt",
- "blowfish",
- "ssh/terminal",
- ]
- pruneopts = "UT"
- revision = "ff983b9c42bc9fbf91556e191cc8efb585c16908"
-
-[[projects]]
- branch = "master"
digest = "1:293d03d721a8bebb909ff16dad088981a995204f67df318456f92ffe13e8e8cd"
name = "golang.org/x/net"
packages = [
@@ -651,12 +307,9 @@
[[projects]]
branch = "master"
- digest = "1:e5bf64d17781e7286a6da4803e459c4b01eca1ddc48fe6cd18db31f1b8b8ab2b"
+ digest = "1:d8e319d450f0e90df8dcf8c9934a878af800d0f4c692cb9657748f0cffff486e"
name = "golang.org/x/sys"
- packages = [
- "unix",
- "windows",
- ]
+ packages = ["unix"]
pruneopts = "UT"
revision = "2be51725563103c17124a318f1745b66f2347acb"
@@ -685,14 +338,6 @@
[[projects]]
branch = "master"
- digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
- name = "golang.org/x/time"
- packages = ["rate"]
- pruneopts = "UT"
- revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
-
-[[projects]]
- branch = "master"
digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
@@ -700,7 +345,7 @@
revision = "db91494dd46c1fdcbbde05e5ff5eb56df8f7d79a"
[[projects]]
- digest = "1:03af1505694005143ff6dc5d0e2802c8200ddb618b1d3f7201482f53798b99b4"
+ digest = "1:8f56475624fb72854d06ca16c2f7032e3cea14a63074e9c199ba8d46431c1127"
name = "google.golang.org/grpc"
packages = [
".",
@@ -715,7 +360,6 @@
"encoding",
"encoding/proto",
"grpclog",
- "health",
"health/grpc_health_v1",
"internal",
"internal/backoff",
@@ -749,21 +393,12 @@
revision = "03a43f93cd29dc549e6d9b11892795c206f9c38c"
version = "v1.20.1"
-[[projects]]
- digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- pruneopts = "UT"
- revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
- version = "v2.2.2"
-
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/bsm/sarama-cluster",
"github.com/cevaris/ordered_map",
- "github.com/coreos/etcd",
"github.com/gogo/protobuf/proto",
"github.com/golang-collections/go-datastructures/queue",
"github.com/golang/protobuf/descriptor",
diff --git a/Gopkg.toml b/Gopkg.toml
index 3b2efd2..bbc8cf0 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -52,14 +52,6 @@
name = "github.com/cevaris/ordered_map"
[[constraint]]
- name = "github.com/confluentinc/confluent-kafka-go"
- version = "0.11.6"
-
-[[constraint]]
- branch = "master"
- name = "github.com/coreswitch/log"
-
-[[constraint]]
name = "github.com/gogo/protobuf"
version = "1.2.0"
@@ -84,21 +76,9 @@
version = "1.4.0"
[[constraint]]
- name = "github.com/rs/zerolog"
- version = "1.11.0"
-
-[[constraint]]
- name = "github.com/sirupsen/logrus"
- version = "1.3.0"
-
-[[constraint]]
name = "github.com/stretchr/testify"
version = "1.3.0"
-[[override]]
- name = "github.com/coreos/etcd"
- version = "3.3.11"
-
[[constraint]]
name = "go.etcd.io/etcd"
version = "3.3.11"
@@ -111,10 +91,6 @@
branch = "master"
name = "golang.org/x/net"
-[[override]]
- branch = "master"
- name = "google.golang.org/genproto"
-
[[constraint]]
name = "google.golang.org/grpc"
version = "1.17.0"
diff --git a/docker/Dockerfile.rw_core b/docker/Dockerfile.rw_core
index 897b316..714ead7 100644
--- a/docker/Dockerfile.rw_core
+++ b/docker/Dockerfile.rw_core
@@ -8,7 +8,6 @@
# Install protobuf requirements
RUN git clone https://github.com/googleapis/googleapis.git /usr/local/include/googleapis
-RUN go get github.com/golang/protobuf/protoc-gen-go
RUN go get google.golang.org/genproto/googleapis/api/annotations
# Prepare directory structure
diff --git a/docker/Dockerfile.simulated_olt b/docker/Dockerfile.simulated_olt
index 4db18af..6513c0e 100644
--- a/docker/Dockerfile.simulated_olt
+++ b/docker/Dockerfile.simulated_olt
@@ -8,7 +8,6 @@
# Install protobuf requirements
RUN git clone https://github.com/googleapis/googleapis.git /usr/local/include/googleapis
-RUN go get github.com/golang/protobuf/protoc-gen-go
RUN go get google.golang.org/genproto/googleapis/api/annotations
# Prepare directory structure
diff --git a/docker/Dockerfile.simulated_onu b/docker/Dockerfile.simulated_onu
index dfd5a98..607ac6a 100644
--- a/docker/Dockerfile.simulated_onu
+++ b/docker/Dockerfile.simulated_onu
@@ -8,7 +8,6 @@
# Install protobuf requirements
RUN git clone https://github.com/googleapis/googleapis.git /usr/local/include/googleapis
-RUN go get github.com/golang/protobuf/protoc-gen-go
RUN go get google.golang.org/genproto/googleapis/api/annotations
# Prepare directory structure
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
deleted file mode 100644
index 339177b..0000000
--- a/vendor/github.com/beorn7/perks/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013 Blake Mizerany
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
deleted file mode 100644
index 1602287..0000000
--- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt
+++ /dev/null
@@ -1,2388 +0,0 @@
-8
-5
-26
-12
-5
-235
-13
-6
-28
-30
-3
-3
-3
-3
-5
-2
-33
-7
-2
-4
-7
-12
-14
-5
-8
-3
-10
-4
-5
-3
-6
-6
-209
-20
-3
-10
-14
-3
-4
-6
-8
-5
-11
-7
-3
-2
-3
-3
-212
-5
-222
-4
-10
-10
-5
-6
-3
-8
-3
-10
-254
-220
-2
-3
-5
-24
-5
-4
-222
-7
-3
-3
-223
-8
-15
-12
-14
-14
-3
-2
-2
-3
-13
-3
-11
-4
-4
-6
-5
-7
-13
-5
-3
-5
-2
-5
-3
-5
-2
-7
-15
-17
-14
-3
-6
-6
-3
-17
-5
-4
-7
-6
-4
-4
-8
-6
-8
-3
-9
-3
-6
-3
-4
-5
-3
-3
-660
-4
-6
-10
-3
-6
-3
-2
-5
-13
-2
-4
-4
-10
-4
-8
-4
-3
-7
-9
-9
-3
-10
-37
-3
-13
-4
-12
-3
-6
-10
-8
-5
-21
-2
-3
-8
-3
-2
-3
-3
-4
-12
-2
-4
-8
-8
-4
-3
-2
-20
-1
-6
-32
-2
-11
-6
-18
-3
-8
-11
-3
-212
-3
-4
-2
-6
-7
-12
-11
-3
-2
-16
-10
-6
-4
-6
-3
-2
-7
-3
-2
-2
-2
-2
-5
-6
-4
-3
-10
-3
-4
-6
-5
-3
-4
-4
-5
-6
-4
-3
-4
-4
-5
-7
-5
-5
-3
-2
-7
-2
-4
-12
-4
-5
-6
-2
-4
-4
-8
-4
-15
-13
-7
-16
-5
-3
-23
-5
-5
-7
-3
-2
-9
-8
-7
-5
-8
-11
-4
-10
-76
-4
-47
-4
-3
-2
-7
-4
-2
-3
-37
-10
-4
-2
-20
-5
-4
-4
-10
-10
-4
-3
-7
-23
-240
-7
-13
-5
-5
-3
-3
-2
-5
-4
-2
-8
-7
-19
-2
-23
-8
-7
-2
-5
-3
-8
-3
-8
-13
-5
-5
-5
-2
-3
-23
-4
-9
-8
-4
-3
-3
-5
-220
-2
-3
-4
-6
-14
-3
-53
-6
-2
-5
-18
-6
-3
-219
-6
-5
-2
-5
-3
-6
-5
-15
-4
-3
-17
-3
-2
-4
-7
-2
-3
-3
-4
-4
-3
-2
-664
-6
-3
-23
-5
-5
-16
-5
-8
-2
-4
-2
-24
-12
-3
-2
-3
-5
-8
-3
-5
-4
-3
-14
-3
-5
-8
-2
-3
-7
-9
-4
-2
-3
-6
-8
-4
-3
-4
-6
-5
-3
-3
-6
-3
-19
-4
-4
-6
-3
-6
-3
-5
-22
-5
-4
-4
-3
-8
-11
-4
-9
-7
-6
-13
-4
-4
-4
-6
-17
-9
-3
-3
-3
-4
-3
-221
-5
-11
-3
-4
-2
-12
-6
-3
-5
-7
-5
-7
-4
-9
-7
-14
-37
-19
-217
-16
-3
-5
-2
-2
-7
-19
-7
-6
-7
-4
-24
-5
-11
-4
-7
-7
-9
-13
-3
-4
-3
-6
-28
-4
-4
-5
-5
-2
-5
-6
-4
-4
-6
-10
-5
-4
-3
-2
-3
-3
-6
-5
-5
-4
-3
-2
-3
-7
-4
-6
-18
-16
-8
-16
-4
-5
-8
-6
-9
-13
-1545
-6
-215
-6
-5
-6
-3
-45
-31
-5
-2
-2
-4
-3
-3
-2
-5
-4
-3
-5
-7
-7
-4
-5
-8
-5
-4
-749
-2
-31
-9
-11
-2
-11
-5
-4
-4
-7
-9
-11
-4
-5
-4
-7
-3
-4
-6
-2
-15
-3
-4
-3
-4
-3
-5
-2
-13
-5
-5
-3
-3
-23
-4
-4
-5
-7
-4
-13
-2
-4
-3
-4
-2
-6
-2
-7
-3
-5
-5
-3
-29
-5
-4
-4
-3
-10
-2
-3
-79
-16
-6
-6
-7
-7
-3
-5
-5
-7
-4
-3
-7
-9
-5
-6
-5
-9
-6
-3
-6
-4
-17
-2
-10
-9
-3
-6
-2
-3
-21
-22
-5
-11
-4
-2
-17
-2
-224
-2
-14
-3
-4
-4
-2
-4
-4
-4
-4
-5
-3
-4
-4
-10
-2
-6
-3
-3
-5
-7
-2
-7
-5
-6
-3
-218
-2
-2
-5
-2
-6
-3
-5
-222
-14
-6
-33
-3
-2
-5
-3
-3
-3
-9
-5
-3
-3
-2
-7
-4
-3
-4
-3
-5
-6
-5
-26
-4
-13
-9
-7
-3
-221
-3
-3
-4
-4
-4
-4
-2
-18
-5
-3
-7
-9
-6
-8
-3
-10
-3
-11
-9
-5
-4
-17
-5
-5
-6
-6
-3
-2
-4
-12
-17
-6
-7
-218
-4
-2
-4
-10
-3
-5
-15
-3
-9
-4
-3
-3
-6
-29
-3
-3
-4
-5
-5
-3
-8
-5
-6
-6
-7
-5
-3
-5
-3
-29
-2
-31
-5
-15
-24
-16
-5
-207
-4
-3
-3
-2
-15
-4
-4
-13
-5
-5
-4
-6
-10
-2
-7
-8
-4
-6
-20
-5
-3
-4
-3
-12
-12
-5
-17
-7
-3
-3
-3
-6
-10
-3
-5
-25
-80
-4
-9
-3
-2
-11
-3
-3
-2
-3
-8
-7
-5
-5
-19
-5
-3
-3
-12
-11
-2
-6
-5
-5
-5
-3
-3
-3
-4
-209
-14
-3
-2
-5
-19
-4
-4
-3
-4
-14
-5
-6
-4
-13
-9
-7
-4
-7
-10
-2
-9
-5
-7
-2
-8
-4
-6
-5
-5
-222
-8
-7
-12
-5
-216
-3
-4
-4
-6
-3
-14
-8
-7
-13
-4
-3
-3
-3
-3
-17
-5
-4
-3
-33
-6
-6
-33
-7
-5
-3
-8
-7
-5
-2
-9
-4
-2
-233
-24
-7
-4
-8
-10
-3
-4
-15
-2
-16
-3
-3
-13
-12
-7
-5
-4
-207
-4
-2
-4
-27
-15
-2
-5
-2
-25
-6
-5
-5
-6
-13
-6
-18
-6
-4
-12
-225
-10
-7
-5
-2
-2
-11
-4
-14
-21
-8
-10
-3
-5
-4
-232
-2
-5
-5
-3
-7
-17
-11
-6
-6
-23
-4
-6
-3
-5
-4
-2
-17
-3
-6
-5
-8
-3
-2
-2
-14
-9
-4
-4
-2
-5
-5
-3
-7
-6
-12
-6
-10
-3
-6
-2
-2
-19
-5
-4
-4
-9
-2
-4
-13
-3
-5
-6
-3
-6
-5
-4
-9
-6
-3
-5
-7
-3
-6
-6
-4
-3
-10
-6
-3
-221
-3
-5
-3
-6
-4
-8
-5
-3
-6
-4
-4
-2
-54
-5
-6
-11
-3
-3
-4
-4
-4
-3
-7
-3
-11
-11
-7
-10
-6
-13
-223
-213
-15
-231
-7
-3
-7
-228
-2
-3
-4
-4
-5
-6
-7
-4
-13
-3
-4
-5
-3
-6
-4
-6
-7
-2
-4
-3
-4
-3
-3
-6
-3
-7
-3
-5
-18
-5
-6
-8
-10
-3
-3
-3
-2
-4
-2
-4
-4
-5
-6
-6
-4
-10
-13
-3
-12
-5
-12
-16
-8
-4
-19
-11
-2
-4
-5
-6
-8
-5
-6
-4
-18
-10
-4
-2
-216
-6
-6
-6
-2
-4
-12
-8
-3
-11
-5
-6
-14
-5
-3
-13
-4
-5
-4
-5
-3
-28
-6
-3
-7
-219
-3
-9
-7
-3
-10
-6
-3
-4
-19
-5
-7
-11
-6
-15
-19
-4
-13
-11
-3
-7
-5
-10
-2
-8
-11
-2
-6
-4
-6
-24
-6
-3
-3
-3
-3
-6
-18
-4
-11
-4
-2
-5
-10
-8
-3
-9
-5
-3
-4
-5
-6
-2
-5
-7
-4
-4
-14
-6
-4
-4
-5
-5
-7
-2
-4
-3
-7
-3
-3
-6
-4
-5
-4
-4
-4
-3
-3
-3
-3
-8
-14
-2
-3
-5
-3
-2
-4
-5
-3
-7
-3
-3
-18
-3
-4
-4
-5
-7
-3
-3
-3
-13
-5
-4
-8
-211
-5
-5
-3
-5
-2
-5
-4
-2
-655
-6
-3
-5
-11
-2
-5
-3
-12
-9
-15
-11
-5
-12
-217
-2
-6
-17
-3
-3
-207
-5
-5
-4
-5
-9
-3
-2
-8
-5
-4
-3
-2
-5
-12
-4
-14
-5
-4
-2
-13
-5
-8
-4
-225
-4
-3
-4
-5
-4
-3
-3
-6
-23
-9
-2
-6
-7
-233
-4
-4
-6
-18
-3
-4
-6
-3
-4
-4
-2
-3
-7
-4
-13
-227
-4
-3
-5
-4
-2
-12
-9
-17
-3
-7
-14
-6
-4
-5
-21
-4
-8
-9
-2
-9
-25
-16
-3
-6
-4
-7
-8
-5
-2
-3
-5
-4
-3
-3
-5
-3
-3
-3
-2
-3
-19
-2
-4
-3
-4
-2
-3
-4
-4
-2
-4
-3
-3
-3
-2
-6
-3
-17
-5
-6
-4
-3
-13
-5
-3
-3
-3
-4
-9
-4
-2
-14
-12
-4
-5
-24
-4
-3
-37
-12
-11
-21
-3
-4
-3
-13
-4
-2
-3
-15
-4
-11
-4
-4
-3
-8
-3
-4
-4
-12
-8
-5
-3
-3
-4
-2
-220
-3
-5
-223
-3
-3
-3
-10
-3
-15
-4
-241
-9
-7
-3
-6
-6
-23
-4
-13
-7
-3
-4
-7
-4
-9
-3
-3
-4
-10
-5
-5
-1
-5
-24
-2
-4
-5
-5
-6
-14
-3
-8
-2
-3
-5
-13
-13
-3
-5
-2
-3
-15
-3
-4
-2
-10
-4
-4
-4
-5
-5
-3
-5
-3
-4
-7
-4
-27
-3
-6
-4
-15
-3
-5
-6
-6
-5
-4
-8
-3
-9
-2
-6
-3
-4
-3
-7
-4
-18
-3
-11
-3
-3
-8
-9
-7
-24
-3
-219
-7
-10
-4
-5
-9
-12
-2
-5
-4
-4
-4
-3
-3
-19
-5
-8
-16
-8
-6
-22
-3
-23
-3
-242
-9
-4
-3
-3
-5
-7
-3
-3
-5
-8
-3
-7
-5
-14
-8
-10
-3
-4
-3
-7
-4
-6
-7
-4
-10
-4
-3
-11
-3
-7
-10
-3
-13
-6
-8
-12
-10
-5
-7
-9
-3
-4
-7
-7
-10
-8
-30
-9
-19
-4
-3
-19
-15
-4
-13
-3
-215
-223
-4
-7
-4
-8
-17
-16
-3
-7
-6
-5
-5
-4
-12
-3
-7
-4
-4
-13
-4
-5
-2
-5
-6
-5
-6
-6
-7
-10
-18
-23
-9
-3
-3
-6
-5
-2
-4
-2
-7
-3
-3
-2
-5
-5
-14
-10
-224
-6
-3
-4
-3
-7
-5
-9
-3
-6
-4
-2
-5
-11
-4
-3
-3
-2
-8
-4
-7
-4
-10
-7
-3
-3
-18
-18
-17
-3
-3
-3
-4
-5
-3
-3
-4
-12
-7
-3
-11
-13
-5
-4
-7
-13
-5
-4
-11
-3
-12
-3
-6
-4
-4
-21
-4
-6
-9
-5
-3
-10
-8
-4
-6
-4
-4
-6
-5
-4
-8
-6
-4
-6
-4
-4
-5
-9
-6
-3
-4
-2
-9
-3
-18
-2
-4
-3
-13
-3
-6
-6
-8
-7
-9
-3
-2
-16
-3
-4
-6
-3
-2
-33
-22
-14
-4
-9
-12
-4
-5
-6
-3
-23
-9
-4
-3
-5
-5
-3
-4
-5
-3
-5
-3
-10
-4
-5
-5
-8
-4
-4
-6
-8
-5
-4
-3
-4
-6
-3
-3
-3
-5
-9
-12
-6
-5
-9
-3
-5
-3
-2
-2
-2
-18
-3
-2
-21
-2
-5
-4
-6
-4
-5
-10
-3
-9
-3
-2
-10
-7
-3
-6
-6
-4
-4
-8
-12
-7
-3
-7
-3
-3
-9
-3
-4
-5
-4
-4
-5
-5
-10
-15
-4
-4
-14
-6
-227
-3
-14
-5
-216
-22
-5
-4
-2
-2
-6
-3
-4
-2
-9
-9
-4
-3
-28
-13
-11
-4
-5
-3
-3
-2
-3
-3
-5
-3
-4
-3
-5
-23
-26
-3
-4
-5
-6
-4
-6
-3
-5
-5
-3
-4
-3
-2
-2
-2
-7
-14
-3
-6
-7
-17
-2
-2
-15
-14
-16
-4
-6
-7
-13
-6
-4
-5
-6
-16
-3
-3
-28
-3
-6
-15
-3
-9
-2
-4
-6
-3
-3
-22
-4
-12
-6
-7
-2
-5
-4
-10
-3
-16
-6
-9
-2
-5
-12
-7
-5
-5
-5
-5
-2
-11
-9
-17
-4
-3
-11
-7
-3
-5
-15
-4
-3
-4
-211
-8
-7
-5
-4
-7
-6
-7
-6
-3
-6
-5
-6
-5
-3
-4
-4
-26
-4
-6
-10
-4
-4
-3
-2
-3
-3
-4
-5
-9
-3
-9
-4
-4
-5
-5
-8
-2
-4
-2
-3
-8
-4
-11
-19
-5
-8
-6
-3
-5
-6
-12
-3
-2
-4
-16
-12
-3
-4
-4
-8
-6
-5
-6
-6
-219
-8
-222
-6
-16
-3
-13
-19
-5
-4
-3
-11
-6
-10
-4
-7
-7
-12
-5
-3
-3
-5
-6
-10
-3
-8
-2
-5
-4
-7
-2
-4
-4
-2
-12
-9
-6
-4
-2
-40
-2
-4
-10
-4
-223
-4
-2
-20
-6
-7
-24
-5
-4
-5
-2
-20
-16
-6
-5
-13
-2
-3
-3
-19
-3
-2
-4
-5
-6
-7
-11
-12
-5
-6
-7
-7
-3
-5
-3
-5
-3
-14
-3
-4
-4
-2
-11
-1
-7
-3
-9
-6
-11
-12
-5
-8
-6
-221
-4
-2
-12
-4
-3
-15
-4
-5
-226
-7
-218
-7
-5
-4
-5
-18
-4
-5
-9
-4
-4
-2
-9
-18
-18
-9
-5
-6
-6
-3
-3
-7
-3
-5
-4
-4
-4
-12
-3
-6
-31
-5
-4
-7
-3
-6
-5
-6
-5
-11
-2
-2
-11
-11
-6
-7
-5
-8
-7
-10
-5
-23
-7
-4
-3
-5
-34
-2
-5
-23
-7
-3
-6
-8
-4
-4
-4
-2
-5
-3
-8
-5
-4
-8
-25
-2
-3
-17
-8
-3
-4
-8
-7
-3
-15
-6
-5
-7
-21
-9
-5
-6
-6
-5
-3
-2
-3
-10
-3
-6
-3
-14
-7
-4
-4
-8
-7
-8
-2
-6
-12
-4
-213
-6
-5
-21
-8
-2
-5
-23
-3
-11
-2
-3
-6
-25
-2
-3
-6
-7
-6
-6
-4
-4
-6
-3
-17
-9
-7
-6
-4
-3
-10
-7
-2
-3
-3
-3
-11
-8
-3
-7
-6
-4
-14
-36
-3
-4
-3
-3
-22
-13
-21
-4
-2
-7
-4
-4
-17
-15
-3
-7
-11
-2
-4
-7
-6
-209
-6
-3
-2
-2
-24
-4
-9
-4
-3
-3
-3
-29
-2
-2
-4
-3
-3
-5
-4
-6
-3
-3
-2
-4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
deleted file mode 100644
index d7d14f8..0000000
--- a/vendor/github.com/beorn7/perks/quantile/stream.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Package quantile computes approximate quantiles over an unbounded data
-// stream within low memory and CPU bounds.
-//
-// A small amount of accuracy is traded to achieve the above properties.
-//
-// Multiple streams can be merged before calling Query to generate a single set
-// of results. This is meaningful when the streams represent the same type of
-// data. See Merge and Samples.
-//
-// For more detailed information about the algorithm used, see:
-//
-// Effective Computation of Biased Quantiles over Data Streams
-//
-// http://www.cs.rutgers.edu/~muthu/bquant.pdf
-package quantile
-
-import (
- "math"
- "sort"
-)
-
-// Sample holds an observed value and meta information for compression. JSON
-// tags have been added for convenience.
-type Sample struct {
- Value float64 `json:",string"`
- Width float64 `json:",string"`
- Delta float64 `json:",string"`
-}
-
-// Samples represents a slice of samples. It implements sort.Interface.
-type Samples []Sample
-
-func (a Samples) Len() int { return len(a) }
-func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
-func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type invariant func(s *stream, r float64) float64
-
-// NewLowBiased returns an initialized Stream for low-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the lower ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewLowBiased(epsilon float64) *Stream {
- ƒ := func(s *stream, r float64) float64 {
- return 2 * epsilon * r
- }
- return newStream(ƒ)
-}
-
-// NewHighBiased returns an initialized Stream for high-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the higher ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewHighBiased(epsilon float64) *Stream {
- ƒ := func(s *stream, r float64) float64 {
- return 2 * epsilon * (s.n - r)
- }
- return newStream(ƒ)
-}
-
-// NewTargeted returns an initialized Stream concerned with a particular set of
-// quantile values that are supplied a priori. Knowing these a priori reduces
-// space and computation time. The targets map maps the desired quantiles to
-// their absolute errors, i.e. the true quantile of a value returned by a query
-// is guaranteed to be within (Quantile±Epsilon).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targetMap map[float64]float64) *Stream {
- // Convert map to slice to avoid slow iterations on a map.
- // ƒ is called on the hot path, so converting the map to a slice
- // beforehand results in significant CPU savings.
- targets := targetMapToSlice(targetMap)
-
- ƒ := func(s *stream, r float64) float64 {
- var m = math.MaxFloat64
- var f float64
- for _, t := range targets {
- if t.quantile*s.n <= r {
- f = (2 * t.epsilon * r) / t.quantile
- } else {
- f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
- }
- if f < m {
- m = f
- }
- }
- return m
- }
- return newStream(ƒ)
-}
-
-type target struct {
- quantile float64
- epsilon float64
-}
-
-func targetMapToSlice(targetMap map[float64]float64) []target {
- targets := make([]target, 0, len(targetMap))
-
- for quantile, epsilon := range targetMap {
- t := target{
- quantile: quantile,
- epsilon: epsilon,
- }
- targets = append(targets, t)
- }
-
- return targets
-}
-
-// Stream computes quantiles for a stream of float64s. It is not thread-safe by
-// design. Take care when using across multiple goroutines.
-type Stream struct {
- *stream
- b Samples
- sorted bool
-}
-
-func newStream(ƒ invariant) *Stream {
- x := &stream{ƒ: ƒ}
- return &Stream{x, make(Samples, 0, 500), true}
-}
-
-// Insert inserts v into the stream.
-func (s *Stream) Insert(v float64) {
- s.insert(Sample{Value: v, Width: 1})
-}
-
-func (s *Stream) insert(sample Sample) {
- s.b = append(s.b, sample)
- s.sorted = false
- if len(s.b) == cap(s.b) {
- s.flush()
- }
-}
-
-// Query returns the computed qth percentiles value. If s was created with
-// NewTargeted, and q is not in the set of quantiles provided a priori, Query
-// will return an unspecified result.
-func (s *Stream) Query(q float64) float64 {
- if !s.flushed() {
- // Fast path when there hasn't been enough data for a flush;
- // this also yields better accuracy for small sets of data.
- l := len(s.b)
- if l == 0 {
- return 0
- }
- i := int(math.Ceil(float64(l) * q))
- if i > 0 {
- i -= 1
- }
- s.maybeSort()
- return s.b[i].Value
- }
- s.flush()
- return s.stream.query(q)
-}
-
-// Merge merges samples into the underlying streams samples. This is handy when
-// merging multiple streams from separate threads, database shards, etc.
-//
-// ATTENTION: This method is broken and does not yield correct results. The
-// underlying algorithm is not capable of merging streams correctly.
-func (s *Stream) Merge(samples Samples) {
- sort.Sort(samples)
- s.stream.merge(samples)
-}
-
-// Reset reinitializes and clears the list reusing the samples buffer memory.
-func (s *Stream) Reset() {
- s.stream.reset()
- s.b = s.b[:0]
-}
-
-// Samples returns stream samples held by s.
-func (s *Stream) Samples() Samples {
- if !s.flushed() {
- return s.b
- }
- s.flush()
- return s.stream.samples()
-}
-
-// Count returns the total number of samples observed in the stream
-// since initialization.
-func (s *Stream) Count() int {
- return len(s.b) + s.stream.count()
-}
-
-func (s *Stream) flush() {
- s.maybeSort()
- s.stream.merge(s.b)
- s.b = s.b[:0]
-}
-
-func (s *Stream) maybeSort() {
- if !s.sorted {
- s.sorted = true
- sort.Sort(s.b)
- }
-}
-
-func (s *Stream) flushed() bool {
- return len(s.stream.l) > 0
-}
-
-type stream struct {
- n float64
- l []Sample
- ƒ invariant
-}
-
-func (s *stream) reset() {
- s.l = s.l[:0]
- s.n = 0
-}
-
-func (s *stream) insert(v float64) {
- s.merge(Samples{{v, 1, 0}})
-}
-
-func (s *stream) merge(samples Samples) {
- // TODO(beorn7): This tries to merge not only individual samples, but
- // whole summaries. The paper doesn't mention merging summaries at
- // all. Unittests show that the merging is inaccurate. Find out how to
- // do merges properly.
- var r float64
- i := 0
- for _, sample := range samples {
- for ; i < len(s.l); i++ {
- c := s.l[i]
- if c.Value > sample.Value {
- // Insert at position i.
- s.l = append(s.l, Sample{})
- copy(s.l[i+1:], s.l[i:])
- s.l[i] = Sample{
- sample.Value,
- sample.Width,
- math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
- // TODO(beorn7): How to calculate delta correctly?
- }
- i++
- goto inserted
- }
- r += c.Width
- }
- s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
- i++
- inserted:
- s.n += sample.Width
- r += sample.Width
- }
- s.compress()
-}
-
-func (s *stream) count() int {
- return int(s.n)
-}
-
-func (s *stream) query(q float64) float64 {
- t := math.Ceil(q * s.n)
- t += math.Ceil(s.ƒ(s, t) / 2)
- p := s.l[0]
- var r float64
- for _, c := range s.l[1:] {
- r += p.Width
- if r+c.Width+c.Delta > t {
- return p.Value
- }
- p = c
- }
- return p.Value
-}
-
-func (s *stream) compress() {
- if len(s.l) < 2 {
- return
- }
- x := s.l[len(s.l)-1]
- xi := len(s.l) - 1
- r := s.n - 1 - x.Width
-
- for i := len(s.l) - 2; i >= 0; i-- {
- c := s.l[i]
- if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
- x.Width += c.Width
- s.l[xi] = x
- // Remove element at i.
- copy(s.l[i:], s.l[i+1:])
- s.l = s.l[:len(s.l)-1]
- xi -= 1
- } else {
- x = c
- xi = i
- }
- r -= c.Width
- }
-}
-
-func (s *stream) samples() Samples {
- samples := make(Samples, len(s.l))
- copy(samples, s.l)
- return samples
-}
diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore
deleted file mode 100644
index c7bd2b7..0000000
--- a/vendor/github.com/coreos/bbolt/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.prof
-*.test
-*.swp
-/bin/
diff --git a/vendor/github.com/coreos/bbolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE
deleted file mode 100644
index 004e77f..0000000
--- a/vendor/github.com/coreos/bbolt/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Ben Johnson
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile
deleted file mode 100644
index e035e63..0000000
--- a/vendor/github.com/coreos/bbolt/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-BRANCH=`git rev-parse --abbrev-ref HEAD`
-COMMIT=`git rev-parse --short HEAD`
-GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
-
-default: build
-
-race:
- @go test -v -race -test.run="TestSimulate_(100op|1000op)"
-
-# go get github.com/kisielk/errcheck
-errcheck:
- @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
-
-test:
- @go test -v -cover .
- @go test -v ./cmd/bolt
-
-.PHONY: fmt test
diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/github.com/coreos/bbolt/README.md
deleted file mode 100644
index 8523e33..0000000
--- a/vendor/github.com/coreos/bbolt/README.md
+++ /dev/null
@@ -1,852 +0,0 @@
-Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
-====
-
-Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
-[LMDB project][lmdb]. The goal of the project is to provide a simple,
-fast, and reliable database for projects that don't require a full database
-server such as Postgres or MySQL.
-
-Since Bolt is meant to be used as such a low-level piece of functionality,
-simplicity is key. The API will be small and only focus on getting values
-and setting values. That's it.
-
-[hyc_symas]: https://twitter.com/hyc_symas
-[lmdb]: http://symas.com/mdb/
-
-## Project Status
-
-Bolt is stable and the API is fixed. Full unit test coverage and randomized
-black box testing are used to ensure database consistency and thread safety.
-Bolt is currently in high-load production environments serving databases as
-large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
-services every day.
-
-## Table of Contents
-
-- [Getting Started](#getting-started)
- - [Installing](#installing)
- - [Opening a database](#opening-a-database)
- - [Transactions](#transactions)
- - [Read-write transactions](#read-write-transactions)
- - [Read-only transactions](#read-only-transactions)
- - [Batch read-write transactions](#batch-read-write-transactions)
- - [Managing transactions manually](#managing-transactions-manually)
- - [Using buckets](#using-buckets)
- - [Using key/value pairs](#using-keyvalue-pairs)
- - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- - [Iterating over keys](#iterating-over-keys)
- - [Prefix scans](#prefix-scans)
- - [Range scans](#range-scans)
- - [ForEach()](#foreach)
- - [Nested buckets](#nested-buckets)
- - [Database backups](#database-backups)
- - [Statistics](#statistics)
- - [Read-Only Mode](#read-only-mode)
- - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
-- [Resources](#resources)
-- [Comparison with other databases](#comparison-with-other-databases)
- - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- - [LevelDB, RocksDB](#leveldb-rocksdb)
- - [LMDB](#lmdb)
-- [Caveats & Limitations](#caveats--limitations)
-- [Reading the Source](#reading-the-source)
-- [Other Projects Using Bolt](#other-projects-using-bolt)
-
-## Getting Started
-
-### Installing
-
-To start using Bolt, install Go and run `go get`:
-
-```sh
-$ go get github.com/boltdb/bolt/...
-```
-
-This will retrieve the library and install the `bolt` command line utility into
-your `$GOBIN` path.
-
-
-### Opening a database
-
-The top-level object in Bolt is a `DB`. It is represented as a single file on
-your disk and represents a consistent snapshot of your data.
-
-To open your database, simply use the `bolt.Open()` function:
-
-```go
-package main
-
-import (
- "log"
-
- "github.com/boltdb/bolt"
-)
-
-func main() {
- // Open the my.db data file in your current directory.
- // It will be created if it doesn't exist.
- db, err := bolt.Open("my.db", 0600, nil)
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-
- ...
-}
-```
-
-Please note that Bolt obtains a file lock on the data file so multiple processes
-cannot open the same database at the same time. Opening an already open Bolt
-database will cause it to hang until the other process closes it. To prevent
-an indefinite wait you can pass a timeout option to the `Open()` function:
-
-```go
-db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
-```
-
-
-### Transactions
-
-Bolt allows only one read-write transaction at a time but allows as many
-read-only transactions as you want at a time. Each transaction has a consistent
-view of the data as it existed when the transaction started.
-
-Individual transactions and all objects created from them (e.g. buckets, keys)
-are not thread safe. To work with data in multiple goroutines you must start
-a transaction for each one or use locking to ensure only one goroutine accesses
-a transaction at a time. Creating transaction from the `DB` is thread safe.
-
-Read-only transactions and read-write transactions should not depend on one
-another and generally shouldn't be opened simultaneously in the same goroutine.
-This can cause a deadlock as the read-write transaction needs to periodically
-re-map the data file but it cannot do so while a read-only transaction is open.
-
-
-#### Read-write transactions
-
-To start a read-write transaction, you can use the `DB.Update()` function:
-
-```go
-err := db.Update(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-Inside the closure, you have a consistent view of the database. You commit the
-transaction by returning `nil` at the end. You can also rollback the transaction
-at any point by returning an error. All database operations are allowed inside
-a read-write transaction.
-
-Always check the return error as it will report any disk failures that can cause
-your transaction to not complete. If you return an error within your closure
-it will be passed through.
-
-
-#### Read-only transactions
-
-To start a read-only transaction, you can use the `DB.View()` function:
-
-```go
-err := db.View(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-You also get a consistent view of the database within this closure, however,
-no mutating operations are allowed within a read-only transaction. You can only
-retrieve buckets, retrieve values, and copy the database within a read-only
-transaction.
-
-
-#### Batch read-write transactions
-
-Each `DB.Update()` waits for disk to commit the writes. This overhead
-can be minimized by combining multiple updates with the `DB.Batch()`
-function:
-
-```go
-err := db.Batch(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-Concurrent Batch calls are opportunistically combined into larger
-transactions. Batch is only useful when there are multiple goroutines
-calling it.
-
-The trade-off is that `Batch` can call the given
-function multiple times, if parts of the transaction fail. The
-function must be idempotent and side effects must take effect only
-after a successful return from `DB.Batch()`.
-
-For example: don't display messages from inside the function, instead
-set variables in the enclosing scope:
-
-```go
-var id uint64
-err := db.Batch(func(tx *bolt.Tx) error {
- // Find last key in bucket, decode as bigendian uint64, increment
- // by one, encode back to []byte, and add new key.
- ...
- id = newValue
- return nil
-})
-if err != nil {
- return ...
-}
-fmt.Println("Allocated ID %d", id)
-```
-
-
-#### Managing transactions manually
-
-The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
-function. These helper functions will start the transaction, execute a function,
-and then safely close your transaction if an error is returned. This is the
-recommended way to use Bolt transactions.
-
-However, sometimes you may want to manually start and end your transactions.
-You can use the `Tx.Begin()` function directly but **please** be sure to close
-the transaction.
-
-```go
-// Start a writable transaction.
-tx, err := db.Begin(true)
-if err != nil {
- return err
-}
-defer tx.Rollback()
-
-// Use the transaction...
-_, err := tx.CreateBucket([]byte("MyBucket"))
-if err != nil {
- return err
-}
-
-// Commit the transaction and check for error.
-if err := tx.Commit(); err != nil {
- return err
-}
-```
-
-The first argument to `DB.Begin()` is a boolean stating if the transaction
-should be writable.
-
-
-### Using buckets
-
-Buckets are collections of key/value pairs within the database. All keys in a
-bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
-function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
- b, err := tx.CreateBucket([]byte("MyBucket"))
- if err != nil {
- return fmt.Errorf("create bucket: %s", err)
- }
- return nil
-})
-```
-
-You can also create a bucket only if it doesn't exist by using the
-`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
-function for all your top-level buckets after you open your database so you can
-guarantee that they exist for future transactions.
-
-To delete a bucket, simply call the `Tx.DeleteBucket()` function.
-
-
-### Using key/value pairs
-
-To save a key/value pair to a bucket, use the `Bucket.Put()` function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte("MyBucket"))
- err := b.Put([]byte("answer"), []byte("42"))
- return err
-})
-```
-
-This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
-bucket. To retrieve this value, we can use the `Bucket.Get()` function:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte("MyBucket"))
- v := b.Get([]byte("answer"))
- fmt.Printf("The answer is: %s\n", v)
- return nil
-})
-```
-
-The `Get()` function does not return an error because its operation is
-guaranteed to work (unless there is some kind of system failure). If the key
-exists then it will return its byte slice value. If it doesn't exist then it
-will return `nil`. It's important to note that you can have a zero-length value
-set to a key which is different than the key not existing.
-
-Use the `Bucket.Delete()` function to delete a key from the bucket.
-
-Please note that values returned from `Get()` are only valid while the
-transaction is open. If you need to use a value outside of the transaction
-then you must use `copy()` to copy it to another byte slice.
-
-
-### Autoincrementing integer for the bucket
-By using the `NextSequence()` function, you can let Bolt determine a sequence
-which can be used as the unique identifier for your key/value pairs. See the
-example below.
-
-```go
-// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
-func (s *Store) CreateUser(u *User) error {
- return s.db.Update(func(tx *bolt.Tx) error {
- // Retrieve the users bucket.
- // This should be created when the DB is first opened.
- b := tx.Bucket([]byte("users"))
-
- // Generate ID for the user.
- // This returns an error only if the Tx is closed or not writeable.
- // That can't happen in an Update() call so I ignore the error check.
- id, _ := b.NextSequence()
- u.ID = int(id)
-
- // Marshal user data into bytes.
- buf, err := json.Marshal(u)
- if err != nil {
- return err
- }
-
- // Persist bytes to users bucket.
- return b.Put(itob(u.ID), buf)
- })
-}
-
-// itob returns an 8-byte big endian representation of v.
-func itob(v int) []byte {
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(v))
- return b
-}
-
-type User struct {
- ID int
- ...
-}
-```
-
-### Iterating over keys
-
-Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
-iteration over these keys extremely fast. To iterate over keys we'll use a
-`Cursor`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- b := tx.Bucket([]byte("MyBucket"))
-
- c := b.Cursor()
-
- for k, v := c.First(); k != nil; k, v = c.Next() {
- fmt.Printf("key=%s, value=%s\n", k, v)
- }
-
- return nil
-})
-```
-
-The cursor allows you to move to a specific point in the list of keys and move
-forward or backward through the keys one at a time.
-
-The following functions are available on the cursor:
-
-```
-First() Move to the first key.
-Last() Move to the last key.
-Seek() Move to a specific key.
-Next() Move to the next key.
-Prev() Move to the previous key.
-```
-
-Each of those functions has a return signature of `(key []byte, value []byte)`.
-When you have iterated to the end of the cursor then `Next()` will return a
-`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
-before calling `Next()` or `Prev()`. If you do not seek to a position then
-these functions will return a `nil` key.
-
-During iteration, if the key is non-`nil` but the value is `nil`, that means
-the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
-access the sub-bucket.
-
-
-#### Prefix scans
-
-To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- c := tx.Bucket([]byte("MyBucket")).Cursor()
-
- prefix := []byte("1234")
- for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
- fmt.Printf("key=%s, value=%s\n", k, v)
- }
-
- return nil
-})
-```
-
-#### Range scans
-
-Another common use case is scanning over a range such as a time range. If you
-use a sortable time encoding such as RFC3339 then you can query a specific
-date range like this:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume our events bucket exists and has RFC3339 encoded time keys.
- c := tx.Bucket([]byte("Events")).Cursor()
-
- // Our time range spans the 90's decade.
- min := []byte("1990-01-01T00:00:00Z")
- max := []byte("2000-01-01T00:00:00Z")
-
- // Iterate over the 90's.
- for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
- fmt.Printf("%s: %s\n", k, v)
- }
-
- return nil
-})
-```
-
-Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
-
-
-#### ForEach()
-
-You can also use the function `ForEach()` if you know you'll be iterating over
-all the keys in a bucket:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- b := tx.Bucket([]byte("MyBucket"))
-
- b.ForEach(func(k, v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- return nil
-})
-```
-
-
-### Nested buckets
-
-You can also store a bucket in a key to create nested buckets. The API is the
-same as the bucket management API on the `DB` object:
-
-```go
-func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
-func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
-func (*Bucket) DeleteBucket(key []byte) error
-```
-
-
-### Database backups
-
-Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
-function to write a consistent view of the database to a writer. If you call
-this from a read-only transaction, it will perform a hot backup and not block
-your other database reads and writes.
-
-By default, it will use a regular file handle which will utilize the operating
-system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
-documentation for information about optimizing for larger-than-RAM datasets.
-
-One common use case is to backup over HTTP so you can use tools like `cURL` to
-do database backups:
-
-```go
-func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
- err := db.View(func(tx *bolt.Tx) error {
- w.Header().Set("Content-Type", "application/octet-stream")
- w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
- w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
- _, err := tx.WriteTo(w)
- return err
- })
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-```
-
-Then you can backup using this command:
-
-```sh
-$ curl http://localhost/backup > my.db
-```
-
-Or you can open your browser to `http://localhost/backup` and it will download
-automatically.
-
-If you want to backup to another file you can use the `Tx.CopyFile()` helper
-function.
-
-
-### Statistics
-
-The database keeps a running count of many of the internal operations it
-performs so you can better understand what's going on. By grabbing a snapshot
-of these stats at two points in time we can see what operations were performed
-in that time range.
-
-For example, we could start a goroutine to log stats every 10 seconds:
-
-```go
-go func() {
- // Grab the initial stats.
- prev := db.Stats()
-
- for {
- // Wait for 10s.
- time.Sleep(10 * time.Second)
-
- // Grab the current stats and diff them.
- stats := db.Stats()
- diff := stats.Sub(&prev)
-
- // Encode stats to JSON and print to STDERR.
- json.NewEncoder(os.Stderr).Encode(diff)
-
- // Save stats for the next loop.
- prev = stats
- }
-}()
-```
-
-It's also useful to pipe these stats to a service such as statsd for monitoring
-or to provide an HTTP endpoint that will perform a fixed-length sample.
-
-
-### Read-Only Mode
-
-Sometimes it is useful to create a shared, read-only Bolt database. To this,
-set the `Options.ReadOnly` flag when opening your database. Read-only mode
-uses a shared lock to allow multiple processes to read from the database but
-it will block any processes from opening the database in read-write mode.
-
-```go
-db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
-if err != nil {
- log.Fatal(err)
-}
-```
-
-### Mobile Use (iOS/Android)
-
-Bolt is able to run on mobile devices by leveraging the binding feature of the
-[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
-contain your database logic and a reference to a `*bolt.DB` with a initializing
-constructor that takes in a filepath where the database file will be stored.
-Neither Android nor iOS require extra permissions or cleanup from using this method.
-
-```go
-func NewBoltDB(filepath string) *BoltDB {
- db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
- if err != nil {
- log.Fatal(err)
- }
-
- return &BoltDB{db}
-}
-
-type BoltDB struct {
- db *bolt.DB
- ...
-}
-
-func (b *BoltDB) Path() string {
- return b.db.Path()
-}
-
-func (b *BoltDB) Close() {
- b.db.Close()
-}
-```
-
-Database logic should be defined as methods on this wrapper struct.
-
-To initialize this struct from the native language (both platforms now sync
-their local storage to the cloud. These snippets disable that functionality for the
-database file):
-
-#### Android
-
-```java
-String path;
-if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
- path = getNoBackupFilesDir().getAbsolutePath();
-} else{
- path = getFilesDir().getAbsolutePath();
-}
-Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
-```
-
-#### iOS
-
-```objc
-- (void)demo {
- NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
- NSUserDomainMask,
- YES) objectAtIndex:0];
- GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
- [self addSkipBackupAttributeToItemAtPath:demo.path];
- //Some DB Logic would go here
- [demo close];
-}
-
-- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
-{
- NSURL* URL= [NSURL fileURLWithPath: filePathString];
- assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
-
- NSError *error = nil;
- BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
- forKey: NSURLIsExcludedFromBackupKey error: &error];
- if(!success){
- NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
- }
- return success;
-}
-
-```
-
-## Resources
-
-For more information on getting started with Bolt, check out the following articles:
-
-* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
-* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
-
-
-## Comparison with other databases
-
-### Postgres, MySQL, & other relational databases
-
-Relational databases structure data into rows and are only accessible through
-the use of SQL. This approach provides flexibility in how you store and query
-your data but also incurs overhead in parsing and planning SQL statements. Bolt
-accesses all data by a byte slice key. This makes Bolt fast to read and write
-data by key but provides no built-in support for joining values together.
-
-Most relational databases (with the exception of SQLite) are standalone servers
-that run separately from your application. This gives your systems
-flexibility to connect multiple application servers to a single database
-server but also adds overhead in serializing and transporting data over the
-network. Bolt runs as a library included in your application so all data access
-has to go through your application's process. This brings data closer to your
-application but limits multi-process access to the data.
-
-
-### LevelDB, RocksDB
-
-LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
-they are libraries bundled into the application, however, their underlying
-structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
-random writes by using a write ahead log and multi-tiered, sorted files called
-SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
-have trade-offs.
-
-If you require a high random write throughput (>10,000 w/sec) or you need to use
-spinning disks then LevelDB could be a good choice. If your application is
-read-heavy or does a lot of range scans then Bolt could be a good choice.
-
-One other important consideration is that LevelDB does not have transactions.
-It supports batch writing of key/values pairs and it supports read snapshots
-but it will not give you the ability to do a compare-and-swap operation safely.
-Bolt supports fully serializable ACID transactions.
-
-
-### LMDB
-
-Bolt was originally a port of LMDB so it is architecturally similar. Both use
-a B+tree, have ACID semantics with fully serializable transactions, and support
-lock-free MVCC using a single writer and multiple readers.
-
-The two projects have somewhat diverged. LMDB heavily focuses on raw performance
-while Bolt has focused on simplicity and ease of use. For example, LMDB allows
-several unsafe actions such as direct writes for the sake of performance. Bolt
-opts to disallow actions which can leave the database in a corrupted state. The
-only exception to this in Bolt is `DB.NoSync`.
-
-There are also a few differences in API. LMDB requires a maximum mmap size when
-opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
-automatically. LMDB overloads the getter and setter functions with multiple
-flags whereas Bolt splits these specialized cases into their own functions.
-
-
-## Caveats & Limitations
-
-It's important to pick the right tool for the job and Bolt is no exception.
-Here are a few things to note when evaluating and using Bolt:
-
-* Bolt is good for read intensive workloads. Sequential write performance is
- also fast but random writes can be slow. You can use `DB.Batch()` or add a
- write-ahead log to help mitigate this issue.
-
-* Bolt uses a B+tree internally so there can be a lot of random page access.
- SSDs provide a significant performance boost over spinning disks.
-
-* Try to avoid long running read transactions. Bolt uses copy-on-write so
- old pages cannot be reclaimed while an old transaction is using them.
-
-* Byte slices returned from Bolt are only valid during a transaction. Once the
- transaction has been committed or rolled back then the memory they point to
- can be reused by a new page or can be unmapped from virtual memory and you'll
- see an `unexpected fault address` panic when accessing it.
-
-* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
- buckets that have random inserts will cause your database to have very poor
- page utilization.
-
-* Use larger buckets in general. Smaller buckets causes poor page utilization
- once they become larger than the page size (typically 4KB).
-
-* Bulk loading a lot of random writes into a new bucket can be slow as the
- page will not split until the transaction is committed. Randomly inserting
- more than 100,000 key/value pairs into a single new bucket in a single
- transaction is not advised.
-
-* Bolt uses a memory-mapped file so the underlying operating system handles the
- caching of the data. Typically, the OS will cache as much of the file as it
- can in memory and will release memory as needed to other processes. This means
- that Bolt can show very high memory usage when working with large databases.
- However, this is expected and the OS will release memory as needed. Bolt can
- handle databases much larger than the available physical RAM, provided its
- memory-map fits in the process virtual address space. It may be problematic
- on 32-bits systems.
-
-* The data structures in the Bolt database are memory mapped so the data file
- will be endian specific. This means that you cannot copy a Bolt file from a
- little endian machine to a big endian machine and have it work. For most
- users this is not a concern since most modern CPUs are little endian.
-
-* Because of the way pages are laid out on disk, Bolt cannot truncate data files
- and return free pages back to the disk. Instead, Bolt maintains a free list
- of unused pages within its data file. These free pages can be reused by later
- transactions. This works well for many use cases as databases generally tend
- to grow. However, it's important to note that deleting large chunks of data
- will not allow you to reclaim that space on disk.
-
- For more information on page allocation, [see this comment][page-allocation].
-
-[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
-
-
-## Reading the Source
-
-Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
-transactional key/value database so it can be a good starting point for people
-interested in how databases work.
-
-The best places to start are the main entry points into Bolt:
-
-- `Open()` - Initializes the reference to the database. It's responsible for
- creating the database if it doesn't exist, obtaining an exclusive lock on the
- file, reading the meta pages, & memory-mapping the file.
-
-- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
- value of the `writable` argument. This requires briefly obtaining the "meta"
- lock to keep track of open transactions. Only one read-write transaction can
- exist at a time so the "rwlock" is acquired during the life of a read-write
- transaction.
-
-- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
- arguments, a cursor is used to traverse the B+tree to the page and position
- where they key & value will be written. Once the position is found, the bucket
- materializes the underlying page and the page's parent pages into memory as
- "nodes". These nodes are where mutations occur during read-write transactions.
- These changes get flushed to disk during commit.
-
-- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
- to move to the page & position of a key/value pair. During a read-only
- transaction, the key and value data is returned as a direct reference to the
- underlying mmap file so there's no allocation overhead. For read-write
- transactions, this data may reference the mmap file or one of the in-memory
- node values.
-
-- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
- or in-memory nodes. It can seek to a specific key, move to the first or last
- value, or it can move forward or backward. The cursor handles the movement up
- and down the B+tree transparently to the end user.
-
-- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
- into pages to be written to disk. Writing to disk then occurs in two phases.
- First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
- new meta page with an incremented transaction ID is written and another
- `fsync()` occurs. This two phase write ensures that partially written data
- pages are ignored in the event of a crash since the meta page pointing to them
- is never written. Partially written meta pages are invalidated because they
- are written with a checksum.
-
-If you have additional notes that could be helpful for others, please submit
-them via pull request.
-
-
-## Other Projects Using Bolt
-
-Below is a list of public, open source projects that use Bolt:
-
-* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
-* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
-* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
-* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
-* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
-* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
-* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
-* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
-* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
-* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
-* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
-* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
-* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
-* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
-* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
-* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
-* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
-* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
-* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
-* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
-* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
-* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
-* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
-* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
-* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
-* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
-* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
- backed by boltdb.
-* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
- simple tx and key scans.
-* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
-* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
-* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
-* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
-* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
-* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
-* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
-* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
-* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
-* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
-* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
-
-If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/github.com/coreos/bbolt/appveyor.yml b/vendor/github.com/coreos/bbolt/appveyor.yml
deleted file mode 100644
index 6e26e94..0000000
--- a/vendor/github.com/coreos/bbolt/appveyor.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-version: "{build}"
-
-os: Windows Server 2012 R2
-
-clone_folder: c:\gopath\src\github.com\boltdb\bolt
-
-environment:
- GOPATH: c:\gopath
-
-install:
- - echo %PATH%
- - echo %GOPATH%
- - go version
- - go env
- - go get -v -t ./...
-
-build_script:
- - go test -v ./...
diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go
deleted file mode 100644
index e659bfb..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_386.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go
deleted file mode 100644
index cca6b7e..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_amd64.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go
deleted file mode 100644
index e659bfb..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_arm.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go
deleted file mode 100644
index 6d23093..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_arm64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build arm64
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go
deleted file mode 100644
index 2b67666..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_linux.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package bolt
-
-import (
- "syscall"
-)
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return syscall.Fdatasync(int(db.file.Fd()))
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go
deleted file mode 100644
index 7058c3d..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_openbsd.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package bolt
-
-import (
- "syscall"
- "unsafe"
-)
-
-const (
- msAsync = 1 << iota // perform asynchronous writes
- msSync // perform synchronous writes
- msInvalidate // invalidate cached data
-)
-
-func msync(db *DB) error {
- _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
- if errno != 0 {
- return errno
- }
- return nil
-}
-
-func fdatasync(db *DB) error {
- if db.data != nil {
- return msync(db)
- }
- return db.file.Sync()
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go
deleted file mode 100644
index 645ddc3..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_ppc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go
deleted file mode 100644
index 2dc6be0..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_ppc64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc64
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
deleted file mode 100644
index 8351e12..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc64le
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go
deleted file mode 100644
index f4dd26b..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_s390x.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build s390x
-
-package bolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go
deleted file mode 100644
index cad62dd..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_unix.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// +build !windows,!plan9,!solaris
-
-package bolt
-
-import (
- "fmt"
- "os"
- "syscall"
- "time"
- "unsafe"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
- var t time.Time
- for {
- // If we're beyond our timeout then return an error.
- // This can only occur after we've attempted a flock once.
- if t.IsZero() {
- t = time.Now()
- } else if timeout > 0 && time.Since(t) > timeout {
- return ErrTimeout
- }
- flag := syscall.LOCK_SH
- if exclusive {
- flag = syscall.LOCK_EX
- }
-
- // Otherwise attempt to obtain an exclusive lock.
- err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
- if err == nil {
- return nil
- } else if err != syscall.EWOULDBLOCK {
- return err
- }
-
- // Wait for a bit and try again.
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- if err := madvise(b, syscall.MADV_RANDOM); err != nil {
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := syscall.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
-
-// NOTE: This function is copied from stdlib because it is not available on darwin.
-func madvise(b []byte, advice int) (err error) {
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- err = e1
- }
- return
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
deleted file mode 100644
index 307bf2b..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "os"
- "syscall"
- "time"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
- var t time.Time
- for {
- // If we're beyond our timeout then return an error.
- // This can only occur after we've attempted a flock once.
- if t.IsZero() {
- t = time.Now()
- } else if timeout > 0 && time.Since(t) > timeout {
- return ErrTimeout
- }
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Pid = 0
- lock.Whence = 0
- lock.Pid = 0
- if exclusive {
- lock.Type = syscall.F_WRLCK
- } else {
- lock.Type = syscall.F_RDLCK
- }
- err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
- if err == nil {
- return nil
- } else if err != syscall.EAGAIN {
- return err
- }
-
- // Wait for a bit and try again.
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Type = syscall.F_UNLCK
- lock.Whence = 0
- return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := unix.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go
deleted file mode 100644
index d538e6a..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_windows.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "os"
- "syscall"
- "time"
- "unsafe"
-)
-
-// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
-var (
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
- procLockFileEx = modkernel32.NewProc("LockFileEx")
- procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
-)
-
-const (
- lockExt = ".lock"
-
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
- flagLockExclusive = 2
- flagLockFailImmediately = 1
-
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
- errLockViolation syscall.Errno = 0x21
-)
-
-func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
- if r == 0 {
- return err
- }
- return nil
-}
-
-func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
- if r == 0 {
- return err
- }
- return nil
-}
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return db.file.Sync()
-}
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
- // Create a separate lock file on windows because a process
- // cannot share an exclusive lock on the same file. This is
- // needed during Tx.WriteTo().
- f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
- if err != nil {
- return err
- }
- db.lockfile = f
-
- var t time.Time
- for {
- // If we're beyond our timeout then return an error.
- // This can only occur after we've attempted a flock once.
- if t.IsZero() {
- t = time.Now()
- } else if timeout > 0 && time.Since(t) > timeout {
- return ErrTimeout
- }
-
- var flag uint32 = flagLockFailImmediately
- if exclusive {
- flag |= flagLockExclusive
- }
-
- err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
- if err == nil {
- return nil
- } else if err != errLockViolation {
- return err
- }
-
- // Wait for a bit and try again.
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
- db.lockfile.Close()
- os.Remove(db.path+lockExt)
- return err
-}
-
-// mmap memory maps a DB's data file.
-// Based on: https://github.com/edsrzf/mmap-go
-func mmap(db *DB, sz int) error {
- if !db.readOnly {
- // Truncate the database to the size of the mmap.
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("truncate: %s", err)
- }
- }
-
- // Open a file mapping handle.
- sizelo := uint32(sz >> 32)
- sizehi := uint32(sz) & 0xffffffff
- h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
- if h == 0 {
- return os.NewSyscallError("CreateFileMapping", errno)
- }
-
- // Create the memory map.
- addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
- if addr == 0 {
- return os.NewSyscallError("MapViewOfFile", errno)
- }
-
- // Close mapping handle.
- if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
- return os.NewSyscallError("CloseHandle", err)
- }
-
- // Convert to a byte array.
- db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
- db.datasz = sz
-
- return nil
-}
-
-// munmap unmaps a pointer from a file.
-// Based on: https://github.com/edsrzf/mmap-go
-func munmap(db *DB) error {
- if db.data == nil {
- return nil
- }
-
- addr := (uintptr)(unsafe.Pointer(&db.data[0]))
- if err := syscall.UnmapViewOfFile(addr); err != nil {
- return os.NewSyscallError("UnmapViewOfFile", err)
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/bbolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go
deleted file mode 100644
index f504425..0000000
--- a/vendor/github.com/coreos/bbolt/boltsync_unix.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !windows,!plan9,!linux,!openbsd
-
-package bolt
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return db.file.Sync()
-}
diff --git a/vendor/github.com/coreos/bbolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go
deleted file mode 100644
index d2f8c52..0000000
--- a/vendor/github.com/coreos/bbolt/bucket.go
+++ /dev/null
@@ -1,748 +0,0 @@
-package bolt
-
-import (
- "bytes"
- "fmt"
- "unsafe"
-)
-
-const (
- // MaxKeySize is the maximum length of a key, in bytes.
- MaxKeySize = 32768
-
- // MaxValueSize is the maximum length of a value, in bytes.
- MaxValueSize = (1 << 31) - 2
-)
-
-const (
- maxUint = ^uint(0)
- minUint = 0
- maxInt = int(^uint(0) >> 1)
- minInt = -maxInt - 1
-)
-
-const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
-
-const (
- minFillPercent = 0.1
- maxFillPercent = 1.0
-)
-
-// DefaultFillPercent is the percentage that split pages are filled.
-// This value can be changed by setting Bucket.FillPercent.
-const DefaultFillPercent = 0.5
-
-// Bucket represents a collection of key/value pairs inside the database.
-type Bucket struct {
- *bucket
- tx *Tx // the associated transaction
- buckets map[string]*Bucket // subbucket cache
- page *page // inline page reference
- rootNode *node // materialized node for the root page.
- nodes map[pgid]*node // node cache
-
- // Sets the threshold for filling nodes when they split. By default,
- // the bucket will fill to 50% but it can be useful to increase this
- // amount if you know that your write workloads are mostly append-only.
- //
- // This is non-persisted across transactions so it must be set in every Tx.
- FillPercent float64
-}
-
-// bucket represents the on-file representation of a bucket.
-// This is stored as the "value" of a bucket key. If the bucket is small enough,
-// then its root page can be stored inline in the "value", after the bucket
-// header. In the case of inline buckets, the "root" will be 0.
-type bucket struct {
- root pgid // page id of the bucket's root-level page
- sequence uint64 // monotonically incrementing, used by NextSequence()
-}
-
-// newBucket returns a new bucket associated with a transaction.
-func newBucket(tx *Tx) Bucket {
- var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
- if tx.writable {
- b.buckets = make(map[string]*Bucket)
- b.nodes = make(map[pgid]*node)
- }
- return b
-}
-
-// Tx returns the tx of the bucket.
-func (b *Bucket) Tx() *Tx {
- return b.tx
-}
-
-// Root returns the root of the bucket.
-func (b *Bucket) Root() pgid {
- return b.root
-}
-
-// Writable returns whether the bucket is writable.
-func (b *Bucket) Writable() bool {
- return b.tx.writable
-}
-
-// Cursor creates a cursor associated with the bucket.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (b *Bucket) Cursor() *Cursor {
- // Update transaction statistics.
- b.tx.stats.CursorCount++
-
- // Allocate and return a cursor.
- return &Cursor{
- bucket: b,
- stack: make([]elemRef, 0),
- }
-}
-
-// Bucket retrieves a nested bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) Bucket(name []byte) *Bucket {
- if b.buckets != nil {
- if child := b.buckets[string(name)]; child != nil {
- return child
- }
- }
-
- // Move cursor to key.
- c := b.Cursor()
- k, v, flags := c.seek(name)
-
- // Return nil if the key doesn't exist or it is not a bucket.
- if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
- return nil
- }
-
- // Otherwise create a bucket and cache it.
- var child = b.openBucket(v)
- if b.buckets != nil {
- b.buckets[string(name)] = child
- }
-
- return child
-}
-
-// Helper method that re-interprets a sub-bucket value
-// from a parent into a Bucket
-func (b *Bucket) openBucket(value []byte) *Bucket {
- var child = newBucket(b.tx)
-
- // If this is a writable transaction then we need to copy the bucket entry.
- // Read-only transactions can point directly at the mmap entry.
- if b.tx.writable {
- child.bucket = &bucket{}
- *child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
- } else {
- child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
- }
-
- // Save a reference to the inline page if the bucket is inline.
- if child.root == 0 {
- child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
- }
-
- return &child
-}
-
-// CreateBucket creates a new bucket at the given key and returns the new bucket.
-// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
- if b.tx.db == nil {
- return nil, ErrTxClosed
- } else if !b.tx.writable {
- return nil, ErrTxNotWritable
- } else if len(key) == 0 {
- return nil, ErrBucketNameRequired
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if there is an existing key.
- if bytes.Equal(key, k) {
- if (flags & bucketLeafFlag) != 0 {
- return nil, ErrBucketExists
- } else {
- return nil, ErrIncompatibleValue
- }
- }
-
- // Create empty, inline bucket.
- var bucket = Bucket{
- bucket: &bucket{},
- rootNode: &node{isLeaf: true},
- FillPercent: DefaultFillPercent,
- }
- var value = bucket.write()
-
- // Insert into node.
- key = cloneBytes(key)
- c.node().put(key, key, value, 0, bucketLeafFlag)
-
- // Since subbuckets are not allowed on inline buckets, we need to
- // dereference the inline page, if it exists. This will cause the bucket
- // to be treated as a regular, non-inline bucket for the rest of the tx.
- b.page = nil
-
- return b.Bucket(key), nil
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
- child, err := b.CreateBucket(key)
- if err == ErrBucketExists {
- return b.Bucket(key), nil
- } else if err != nil {
- return nil, err
- }
- return child, nil
-}
-
-// DeleteBucket deletes a bucket at the given key.
-// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
-func (b *Bucket) DeleteBucket(key []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if bucket doesn't exist or is not a bucket.
- if !bytes.Equal(key, k) {
- return ErrBucketNotFound
- } else if (flags & bucketLeafFlag) == 0 {
- return ErrIncompatibleValue
- }
-
- // Recursively delete all child buckets.
- child := b.Bucket(key)
- err := child.ForEach(func(k, v []byte) error {
- if v == nil {
- if err := child.DeleteBucket(k); err != nil {
- return fmt.Errorf("delete bucket: %s", err)
- }
- }
- return nil
- })
- if err != nil {
- return err
- }
-
- // Remove cached copy.
- delete(b.buckets, string(key))
-
- // Release all bucket pages to freelist.
- child.nodes = nil
- child.rootNode = nil
- child.free()
-
- // Delete the node if we have a matching key.
- c.node().del(key)
-
- return nil
-}
-
-// Get retrieves the value for a key in the bucket.
-// Returns a nil value if the key does not exist or if the key is a nested bucket.
-// The returned value is only valid for the life of the transaction.
-func (b *Bucket) Get(key []byte) []byte {
- k, v, flags := b.Cursor().seek(key)
-
- // Return nil if this is a bucket.
- if (flags & bucketLeafFlag) != 0 {
- return nil
- }
-
- // If our target node isn't the same key as what's passed in then return nil.
- if !bytes.Equal(key, k) {
- return nil
- }
- return v
-}
-
-// Put sets the value for a key in the bucket.
-// If the key exist then its previous value will be overwritten.
-// Supplied value must remain valid for the life of the transaction.
-// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
-func (b *Bucket) Put(key []byte, value []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- } else if len(key) == 0 {
- return ErrKeyRequired
- } else if len(key) > MaxKeySize {
- return ErrKeyTooLarge
- } else if int64(len(value)) > MaxValueSize {
- return ErrValueTooLarge
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if there is an existing key with a bucket value.
- if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
-
- // Insert into node.
- key = cloneBytes(key)
- c.node().put(key, key, value, 0, 0)
-
- return nil
-}
-
-// Delete removes a key from the bucket.
-// If the key does not exist then nothing is done and a nil error is returned.
-// Returns an error if the bucket was created from a read-only transaction.
-func (b *Bucket) Delete(key []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- _, _, flags := c.seek(key)
-
- // Return an error if there is already existing bucket value.
- if (flags & bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
-
- // Delete the node if we have a matching key.
- c.node().del(key)
-
- return nil
-}
-
-// NextSequence returns an autoincrementing integer for the bucket.
-func (b *Bucket) NextSequence() (uint64, error) {
- if b.tx.db == nil {
- return 0, ErrTxClosed
- } else if !b.Writable() {
- return 0, ErrTxNotWritable
- }
-
- // Materialize the root node if it hasn't been already so that the
- // bucket will be saved during commit.
- if b.rootNode == nil {
- _ = b.node(b.root, nil)
- }
-
- // Increment and return the sequence.
- b.bucket.sequence++
- return b.bucket.sequence, nil
-}
-
-// ForEach executes a function for each key/value pair in a bucket.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller. The provided function must not modify
-// the bucket; this will result in undefined behavior.
-func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
- if b.tx.db == nil {
- return ErrTxClosed
- }
- c := b.Cursor()
- for k, v := c.First(); k != nil; k, v = c.Next() {
- if err := fn(k, v); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Stat returns stats on a bucket.
-func (b *Bucket) Stats() BucketStats {
- var s, subStats BucketStats
- pageSize := b.tx.db.pageSize
- s.BucketN += 1
- if b.root == 0 {
- s.InlineBucketN += 1
- }
- b.forEachPage(func(p *page, depth int) {
- if (p.flags & leafPageFlag) != 0 {
- s.KeyN += int(p.count)
-
- // used totals the used bytes for the page
- used := pageHeaderSize
-
- if p.count != 0 {
- // If page has any elements, add all element headers.
- used += leafPageElementSize * int(p.count-1)
-
- // Add all element key, value sizes.
- // The computation takes advantage of the fact that the position
- // of the last element's key/value equals to the total of the sizes
- // of all previous elements' keys and values.
- // It also includes the last element's header.
- lastElement := p.leafPageElement(p.count - 1)
- used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
- }
-
- if b.root == 0 {
- // For inlined bucket just update the inline stats
- s.InlineBucketInuse += used
- } else {
- // For non-inlined bucket update all the leaf stats
- s.LeafPageN++
- s.LeafInuse += used
- s.LeafOverflowN += int(p.overflow)
-
- // Collect stats from sub-buckets.
- // Do that by iterating over all element headers
- // looking for the ones with the bucketLeafFlag.
- for i := uint16(0); i < p.count; i++ {
- e := p.leafPageElement(i)
- if (e.flags & bucketLeafFlag) != 0 {
- // For any bucket element, open the element value
- // and recursively call Stats on the contained bucket.
- subStats.Add(b.openBucket(e.value()).Stats())
- }
- }
- }
- } else if (p.flags & branchPageFlag) != 0 {
- s.BranchPageN++
- lastElement := p.branchPageElement(p.count - 1)
-
- // used totals the used bytes for the page
- // Add header and all element headers.
- used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
-
- // Add size of all keys and values.
- // Again, use the fact that last element's position equals to
- // the total of key, value sizes of all previous elements.
- used += int(lastElement.pos + lastElement.ksize)
- s.BranchInuse += used
- s.BranchOverflowN += int(p.overflow)
- }
-
- // Keep track of maximum page depth.
- if depth+1 > s.Depth {
- s.Depth = (depth + 1)
- }
- })
-
- // Alloc stats can be computed from page counts and pageSize.
- s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
- s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
-
- // Add the max depth of sub-buckets to get total nested depth.
- s.Depth += subStats.Depth
- // Add the stats for all sub-buckets
- s.Add(subStats)
- return s
-}
-
-// forEachPage iterates over every page in a bucket, including inline pages.
-func (b *Bucket) forEachPage(fn func(*page, int)) {
- // If we have an inline page then just use that.
- if b.page != nil {
- fn(b.page, 0)
- return
- }
-
- // Otherwise traverse the page hierarchy.
- b.tx.forEachPage(b.root, 0, fn)
-}
-
-// forEachPageNode iterates over every page (or node) in a bucket.
-// This also includes inline pages.
-func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
- // If we have an inline page or root node then just use that.
- if b.page != nil {
- fn(b.page, nil, 0)
- return
- }
- b._forEachPageNode(b.root, 0, fn)
-}
-
-func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
- var p, n = b.pageNode(pgid)
-
- // Execute function.
- fn(p, n, depth)
-
- // Recursively loop over children.
- if p != nil {
- if (p.flags & branchPageFlag) != 0 {
- for i := 0; i < int(p.count); i++ {
- elem := p.branchPageElement(uint16(i))
- b._forEachPageNode(elem.pgid, depth+1, fn)
- }
- }
- } else {
- if !n.isLeaf {
- for _, inode := range n.inodes {
- b._forEachPageNode(inode.pgid, depth+1, fn)
- }
- }
- }
-}
-
-// spill writes all the nodes for this bucket to dirty pages.
-func (b *Bucket) spill() error {
- // Spill all child buckets first.
- for name, child := range b.buckets {
- // If the child bucket is small enough and it has no child buckets then
- // write it inline into the parent bucket's page. Otherwise spill it
- // like a normal bucket and make the parent value a pointer to the page.
- var value []byte
- if child.inlineable() {
- child.free()
- value = child.write()
- } else {
- if err := child.spill(); err != nil {
- return err
- }
-
- // Update the child bucket header in this bucket.
- value = make([]byte, unsafe.Sizeof(bucket{}))
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
- *bucket = *child.bucket
- }
-
- // Skip writing the bucket if there are no materialized nodes.
- if child.rootNode == nil {
- continue
- }
-
- // Update parent node.
- var c = b.Cursor()
- k, _, flags := c.seek([]byte(name))
- if !bytes.Equal([]byte(name), k) {
- panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
- }
- if flags&bucketLeafFlag == 0 {
- panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
- }
- c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
- }
-
- // Ignore if there's not a materialized root node.
- if b.rootNode == nil {
- return nil
- }
-
- // Spill nodes.
- if err := b.rootNode.spill(); err != nil {
- return err
- }
- b.rootNode = b.rootNode.root()
-
- // Update the root node for this bucket.
- if b.rootNode.pgid >= b.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
- }
- b.root = b.rootNode.pgid
-
- return nil
-}
-
-// inlineable returns true if a bucket is small enough to be written inline
-// and if it contains no subbuckets. Otherwise returns false.
-func (b *Bucket) inlineable() bool {
- var n = b.rootNode
-
- // Bucket must only contain a single leaf node.
- if n == nil || !n.isLeaf {
- return false
- }
-
- // Bucket is not inlineable if it contains subbuckets or if it goes beyond
- // our threshold for inline bucket size.
- var size = pageHeaderSize
- for _, inode := range n.inodes {
- size += leafPageElementSize + len(inode.key) + len(inode.value)
-
- if inode.flags&bucketLeafFlag != 0 {
- return false
- } else if size > b.maxInlineBucketSize() {
- return false
- }
- }
-
- return true
-}
-
-// Returns the maximum total size of a bucket to make it a candidate for inlining.
-func (b *Bucket) maxInlineBucketSize() int {
- return b.tx.db.pageSize / 4
-}
-
-// write allocates and writes a bucket to a byte slice.
-func (b *Bucket) write() []byte {
- // Allocate the appropriate size.
- var n = b.rootNode
- var value = make([]byte, bucketHeaderSize+n.size())
-
- // Write a bucket header.
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
- *bucket = *b.bucket
-
- // Convert byte slice to a fake page and write the root node.
- var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
- n.write(p)
-
- return value
-}
-
-// rebalance attempts to balance all nodes.
-func (b *Bucket) rebalance() {
- for _, n := range b.nodes {
- n.rebalance()
- }
- for _, child := range b.buckets {
- child.rebalance()
- }
-}
-
-// node creates a node from a page and associates it with a given parent.
-func (b *Bucket) node(pgid pgid, parent *node) *node {
- _assert(b.nodes != nil, "nodes map expected")
-
- // Retrieve node if it's already been created.
- if n := b.nodes[pgid]; n != nil {
- return n
- }
-
- // Otherwise create a node and cache it.
- n := &node{bucket: b, parent: parent}
- if parent == nil {
- b.rootNode = n
- } else {
- parent.children = append(parent.children, n)
- }
-
- // Use the inline page if this is an inline bucket.
- var p = b.page
- if p == nil {
- p = b.tx.page(pgid)
- }
-
- // Read the page into the node and cache it.
- n.read(p)
- b.nodes[pgid] = n
-
- // Update statistics.
- b.tx.stats.NodeCount++
-
- return n
-}
-
-// free recursively frees all pages in the bucket.
-func (b *Bucket) free() {
- if b.root == 0 {
- return
- }
-
- var tx = b.tx
- b.forEachPageNode(func(p *page, n *node, _ int) {
- if p != nil {
- tx.db.freelist.free(tx.meta.txid, p)
- } else {
- n.free()
- }
- })
- b.root = 0
-}
-
-// dereference removes all references to the old mmap.
-func (b *Bucket) dereference() {
- if b.rootNode != nil {
- b.rootNode.root().dereference()
- }
-
- for _, child := range b.buckets {
- child.dereference()
- }
-}
-
-// pageNode returns the in-memory node, if it exists.
-// Otherwise returns the underlying page.
-func (b *Bucket) pageNode(id pgid) (*page, *node) {
- // Inline buckets have a fake page embedded in their value so treat them
- // differently. We'll return the rootNode (if available) or the fake page.
- if b.root == 0 {
- if id != 0 {
- panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
- }
- if b.rootNode != nil {
- return nil, b.rootNode
- }
- return b.page, nil
- }
-
- // Check the node cache for non-inline buckets.
- if b.nodes != nil {
- if n := b.nodes[id]; n != nil {
- return nil, n
- }
- }
-
- // Finally lookup the page from the transaction if no node is materialized.
- return b.tx.page(id), nil
-}
-
-// BucketStats records statistics about resources used by a bucket.
-type BucketStats struct {
- // Page count statistics.
- BranchPageN int // number of logical branch pages
- BranchOverflowN int // number of physical branch overflow pages
- LeafPageN int // number of logical leaf pages
- LeafOverflowN int // number of physical leaf overflow pages
-
- // Tree statistics.
- KeyN int // number of keys/value pairs
- Depth int // number of levels in B+tree
-
- // Page size utilization.
- BranchAlloc int // bytes allocated for physical branch pages
- BranchInuse int // bytes actually used for branch data
- LeafAlloc int // bytes allocated for physical leaf pages
- LeafInuse int // bytes actually used for leaf data
-
- // Bucket statistics
- BucketN int // total number of buckets including the top bucket
- InlineBucketN int // total number on inlined buckets
- InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
-}
-
-func (s *BucketStats) Add(other BucketStats) {
- s.BranchPageN += other.BranchPageN
- s.BranchOverflowN += other.BranchOverflowN
- s.LeafPageN += other.LeafPageN
- s.LeafOverflowN += other.LeafOverflowN
- s.KeyN += other.KeyN
- if s.Depth < other.Depth {
- s.Depth = other.Depth
- }
- s.BranchAlloc += other.BranchAlloc
- s.BranchInuse += other.BranchInuse
- s.LeafAlloc += other.LeafAlloc
- s.LeafInuse += other.LeafInuse
-
- s.BucketN += other.BucketN
- s.InlineBucketN += other.InlineBucketN
- s.InlineBucketInuse += other.InlineBucketInuse
-}
-
-// cloneBytes returns a copy of a given slice.
-func cloneBytes(v []byte) []byte {
- var clone = make([]byte, len(v))
- copy(clone, v)
- return clone
-}
diff --git a/vendor/github.com/coreos/bbolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go
deleted file mode 100644
index 1be9f35..0000000
--- a/vendor/github.com/coreos/bbolt/cursor.go
+++ /dev/null
@@ -1,400 +0,0 @@
-package bolt
-
-import (
- "bytes"
- "fmt"
- "sort"
-)
-
-// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
-// Cursors see nested buckets with value == nil.
-// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
-//
-// Keys and values returned from the cursor are only valid for the life of the transaction.
-//
-// Changing data while traversing with a cursor may cause it to be invalidated
-// and return unexpected keys and/or values. You must reposition your cursor
-// after mutating data.
-type Cursor struct {
- bucket *Bucket
- stack []elemRef
-}
-
-// Bucket returns the bucket that this cursor was created from.
-func (c *Cursor) Bucket() *Bucket {
- return c.bucket
-}
-
-// First moves the cursor to the first item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) First() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
- p, n := c.bucket.pageNode(c.bucket.root)
- c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
- c.first()
-
- // If we land on an empty page then move to the next value.
- // https://github.com/boltdb/bolt/issues/450
- if c.stack[len(c.stack)-1].count() == 0 {
- c.next()
- }
-
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-
-}
-
-// Last moves the cursor to the last item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Last() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
- p, n := c.bucket.pageNode(c.bucket.root)
- ref := elemRef{page: p, node: n}
- ref.index = ref.count() - 1
- c.stack = append(c.stack, ref)
- c.last()
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Next moves the cursor to the next item in the bucket and returns its key and value.
-// If the cursor is at the end of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Next() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- k, v, flags := c.next()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Prev moves the cursor to the previous item in the bucket and returns its key and value.
-// If the cursor is at the beginning of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Prev() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
-
- // Attempt to move back one element until we're successful.
- // Move up the stack as we hit the beginning of each page in our stack.
- for i := len(c.stack) - 1; i >= 0; i-- {
- elem := &c.stack[i]
- if elem.index > 0 {
- elem.index--
- break
- }
- c.stack = c.stack[:i]
- }
-
- // If we've hit the end then return nil.
- if len(c.stack) == 0 {
- return nil, nil
- }
-
- // Move down the stack to find the last element of the last leaf under this branch.
- c.last()
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used. If no keys
-// follow, a nil key is returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
- k, v, flags := c.seek(seek)
-
- // If we ended up after the last element of a page then move to the next one.
- if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
- k, v, flags = c.next()
- }
-
- if k == nil {
- return nil, nil
- } else if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Delete removes the current key/value under the cursor from the bucket.
-// Delete fails if current key/value is a bucket or if the transaction is not writable.
-func (c *Cursor) Delete() error {
- if c.bucket.tx.db == nil {
- return ErrTxClosed
- } else if !c.bucket.Writable() {
- return ErrTxNotWritable
- }
-
- key, _, flags := c.keyValue()
- // Return an error if current value is a bucket.
- if (flags & bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
- c.node().del(key)
-
- return nil
-}
-
-// seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used.
-func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
- _assert(c.bucket.tx.db != nil, "tx closed")
-
- // Start from root page/node and traverse to correct page.
- c.stack = c.stack[:0]
- c.search(seek, c.bucket.root)
- ref := &c.stack[len(c.stack)-1]
-
- // If the cursor is pointing to the end of page/node then return nil.
- if ref.index >= ref.count() {
- return nil, nil, 0
- }
-
- // If this is a bucket then return a nil value.
- return c.keyValue()
-}
-
-// first moves the cursor to the first leaf element under the last page in the stack.
-func (c *Cursor) first() {
- for {
- // Exit when we hit a leaf page.
- var ref = &c.stack[len(c.stack)-1]
- if ref.isLeaf() {
- break
- }
-
- // Keep adding pages pointing to the first element to the stack.
- var pgid pgid
- if ref.node != nil {
- pgid = ref.node.inodes[ref.index].pgid
- } else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
- }
- p, n := c.bucket.pageNode(pgid)
- c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
- }
-}
-
-// last moves the cursor to the last leaf element under the last page in the stack.
-func (c *Cursor) last() {
- for {
- // Exit when we hit a leaf page.
- ref := &c.stack[len(c.stack)-1]
- if ref.isLeaf() {
- break
- }
-
- // Keep adding pages pointing to the last element in the stack.
- var pgid pgid
- if ref.node != nil {
- pgid = ref.node.inodes[ref.index].pgid
- } else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
- }
- p, n := c.bucket.pageNode(pgid)
-
- var nextRef = elemRef{page: p, node: n}
- nextRef.index = nextRef.count() - 1
- c.stack = append(c.stack, nextRef)
- }
-}
-
-// next moves to the next leaf element and returns the key and value.
-// If the cursor is at the last leaf element then it stays there and returns nil.
-func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
- for {
- // Attempt to move over one element until we're successful.
- // Move up the stack as we hit the end of each page in our stack.
- var i int
- for i = len(c.stack) - 1; i >= 0; i-- {
- elem := &c.stack[i]
- if elem.index < elem.count()-1 {
- elem.index++
- break
- }
- }
-
- // If we've hit the root page then stop and return. This will leave the
- // cursor on the last element of the last page.
- if i == -1 {
- return nil, nil, 0
- }
-
- // Otherwise start from where we left off in the stack and find the
- // first element of the first leaf page.
- c.stack = c.stack[:i+1]
- c.first()
-
- // If this is an empty page then restart and move back up the stack.
- // https://github.com/boltdb/bolt/issues/450
- if c.stack[len(c.stack)-1].count() == 0 {
- continue
- }
-
- return c.keyValue()
- }
-}
-
-// search recursively performs a binary search against a given page/node until it finds a given key.
-func (c *Cursor) search(key []byte, pgid pgid) {
- p, n := c.bucket.pageNode(pgid)
- if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
- panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
- }
- e := elemRef{page: p, node: n}
- c.stack = append(c.stack, e)
-
- // If we're on a leaf page/node then find the specific node.
- if e.isLeaf() {
- c.nsearch(key)
- return
- }
-
- if n != nil {
- c.searchNode(key, n)
- return
- }
- c.searchPage(key, p)
-}
-
-func (c *Cursor) searchNode(key []byte, n *node) {
- var exact bool
- index := sort.Search(len(n.inodes), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
- ret := bytes.Compare(n.inodes[i].key, key)
- if ret == 0 {
- exact = true
- }
- return ret != -1
- })
- if !exact && index > 0 {
- index--
- }
- c.stack[len(c.stack)-1].index = index
-
- // Recursively search to the next page.
- c.search(key, n.inodes[index].pgid)
-}
-
-func (c *Cursor) searchPage(key []byte, p *page) {
- // Binary search for the correct range.
- inodes := p.branchPageElements()
-
- var exact bool
- index := sort.Search(int(p.count), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
- ret := bytes.Compare(inodes[i].key(), key)
- if ret == 0 {
- exact = true
- }
- return ret != -1
- })
- if !exact && index > 0 {
- index--
- }
- c.stack[len(c.stack)-1].index = index
-
- // Recursively search to the next page.
- c.search(key, inodes[index].pgid)
-}
-
-// nsearch searches the leaf node on the top of the stack for a key.
-func (c *Cursor) nsearch(key []byte) {
- e := &c.stack[len(c.stack)-1]
- p, n := e.page, e.node
-
- // If we have a node then search its inodes.
- if n != nil {
- index := sort.Search(len(n.inodes), func(i int) bool {
- return bytes.Compare(n.inodes[i].key, key) != -1
- })
- e.index = index
- return
- }
-
- // If we have a page then search its leaf elements.
- inodes := p.leafPageElements()
- index := sort.Search(int(p.count), func(i int) bool {
- return bytes.Compare(inodes[i].key(), key) != -1
- })
- e.index = index
-}
-
-// keyValue returns the key and value of the current leaf element.
-func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
- ref := &c.stack[len(c.stack)-1]
- if ref.count() == 0 || ref.index >= ref.count() {
- return nil, nil, 0
- }
-
- // Retrieve value from node.
- if ref.node != nil {
- inode := &ref.node.inodes[ref.index]
- return inode.key, inode.value, inode.flags
- }
-
- // Or retrieve value from page.
- elem := ref.page.leafPageElement(uint16(ref.index))
- return elem.key(), elem.value(), elem.flags
-}
-
-// node returns the node that the cursor is currently positioned on.
-func (c *Cursor) node() *node {
- _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
-
- // If the top of the stack is a leaf node then just return it.
- if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
- return ref.node
- }
-
- // Start from root and traverse down the hierarchy.
- var n = c.stack[0].node
- if n == nil {
- n = c.bucket.node(c.stack[0].page.id, nil)
- }
- for _, ref := range c.stack[:len(c.stack)-1] {
- _assert(!n.isLeaf, "expected branch node")
- n = n.childAt(int(ref.index))
- }
- _assert(n.isLeaf, "expected leaf node")
- return n
-}
-
-// elemRef represents a reference to an element on a given page/node.
-type elemRef struct {
- page *page
- node *node
- index int
-}
-
-// isLeaf returns whether the ref is pointing at a leaf page/node.
-func (r *elemRef) isLeaf() bool {
- if r.node != nil {
- return r.node.isLeaf
- }
- return (r.page.flags & leafPageFlag) != 0
-}
-
-// count returns the number of inodes or page elements.
-func (r *elemRef) count() int {
- if r.node != nil {
- return len(r.node.inodes)
- }
- return int(r.page.count)
-}
diff --git a/vendor/github.com/coreos/bbolt/db.go b/vendor/github.com/coreos/bbolt/db.go
deleted file mode 100644
index 1223493..0000000
--- a/vendor/github.com/coreos/bbolt/db.go
+++ /dev/null
@@ -1,1036 +0,0 @@
-package bolt
-
-import (
- "errors"
- "fmt"
- "hash/fnv"
- "log"
- "os"
- "runtime"
- "runtime/debug"
- "strings"
- "sync"
- "time"
- "unsafe"
-)
-
-// The largest step that can be taken when remapping the mmap.
-const maxMmapStep = 1 << 30 // 1GB
-
-// The data file format version.
-const version = 2
-
-// Represents a marker value to indicate that a file is a Bolt DB.
-const magic uint32 = 0xED0CDAED
-
-// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
-// syncing changes to a file. This is required as some operating systems,
-// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
-// must be synchronized using the msync(2) syscall.
-const IgnoreNoSync = runtime.GOOS == "openbsd"
-
-// Default values if not set in a DB instance.
-const (
- DefaultMaxBatchSize int = 1000
- DefaultMaxBatchDelay = 10 * time.Millisecond
- DefaultAllocSize = 16 * 1024 * 1024
-)
-
-// default page size for db is set to the OS page size.
-var defaultPageSize = os.Getpagesize()
-
-// DB represents a collection of buckets persisted to a file on disk.
-// All data access is performed through transactions which can be obtained through the DB.
-// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
-type DB struct {
- // When enabled, the database will perform a Check() after every commit.
- // A panic is issued if the database is in an inconsistent state. This
- // flag has a large performance impact so it should only be used for
- // debugging purposes.
- StrictMode bool
-
- // Setting the NoSync flag will cause the database to skip fsync()
- // calls after each commit. This can be useful when bulk loading data
- // into a database and you can restart the bulk load in the event of
- // a system failure or database corruption. Do not set this flag for
- // normal use.
- //
- // If the package global IgnoreNoSync constant is true, this value is
- // ignored. See the comment on that constant for more details.
- //
- // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
- NoSync bool
-
- // When true, skips the truncate call when growing the database.
- // Setting this to true is only safe on non-ext3/ext4 systems.
- // Skipping truncation avoids preallocation of hard drive space and
- // bypasses a truncate() and fsync() syscall on remapping.
- //
- // https://github.com/boltdb/bolt/issues/284
- NoGrowSync bool
-
- // If you want to read the entire database fast, you can set MmapFlag to
- // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
- MmapFlags int
-
- // MaxBatchSize is the maximum size of a batch. Default value is
- // copied from DefaultMaxBatchSize in Open.
- //
- // If <=0, disables batching.
- //
- // Do not change concurrently with calls to Batch.
- MaxBatchSize int
-
- // MaxBatchDelay is the maximum delay before a batch starts.
- // Default value is copied from DefaultMaxBatchDelay in Open.
- //
- // If <=0, effectively disables batching.
- //
- // Do not change concurrently with calls to Batch.
- MaxBatchDelay time.Duration
-
- // AllocSize is the amount of space allocated when the database
- // needs to create new pages. This is done to amortize the cost
- // of truncate() and fsync() when growing the data file.
- AllocSize int
-
- path string
- file *os.File
- lockfile *os.File // windows only
- dataref []byte // mmap'ed readonly, write throws SEGV
- data *[maxMapSize]byte
- datasz int
- filesz int // current on disk file size
- meta0 *meta
- meta1 *meta
- pageSize int
- opened bool
- rwtx *Tx
- txs []*Tx
- freelist *freelist
- stats Stats
-
- pagePool sync.Pool
-
- batchMu sync.Mutex
- batch *batch
-
- rwlock sync.Mutex // Allows only one writer at a time.
- metalock sync.Mutex // Protects meta page access.
- mmaplock sync.RWMutex // Protects mmap access during remapping.
- statlock sync.RWMutex // Protects stats access.
-
- ops struct {
- writeAt func(b []byte, off int64) (n int, err error)
- }
-
- // Read only mode.
- // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
- readOnly bool
-}
-
-// Path returns the path to currently open database file.
-func (db *DB) Path() string {
- return db.path
-}
-
-// GoString returns the Go string representation of the database.
-func (db *DB) GoString() string {
- return fmt.Sprintf("bolt.DB{path:%q}", db.path)
-}
-
-// String returns the string representation of the database.
-func (db *DB) String() string {
- return fmt.Sprintf("DB<%q>", db.path)
-}
-
-// Open creates and opens a database at the given path.
-// If the file does not exist then it will be created automatically.
-// Passing in nil options will cause Bolt to open the database with the default options.
-func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
- var db = &DB{opened: true}
-
- // Set default options if no options are provided.
- if options == nil {
- options = DefaultOptions
- }
- db.NoGrowSync = options.NoGrowSync
- db.MmapFlags = options.MmapFlags
-
- // Set default values for later DB operations.
- db.MaxBatchSize = DefaultMaxBatchSize
- db.MaxBatchDelay = DefaultMaxBatchDelay
- db.AllocSize = DefaultAllocSize
-
- flag := os.O_RDWR
- if options.ReadOnly {
- flag = os.O_RDONLY
- db.readOnly = true
- }
-
- // Open data file and separate sync handler for metadata writes.
- db.path = path
- var err error
- if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
- _ = db.close()
- return nil, err
- }
-
- // Lock file so that other processes using Bolt in read-write mode cannot
- // use the database at the same time. This would cause corruption since
- // the two processes would write meta pages and free pages separately.
- // The database file is locked exclusively (only one process can grab the lock)
- // if !options.ReadOnly.
- // The database file is locked using the shared lock (more than one process may
- // hold a lock at the same time) otherwise (options.ReadOnly is set).
- if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
- _ = db.close()
- return nil, err
- }
-
- // Default values for test hooks
- db.ops.writeAt = db.file.WriteAt
-
- // Initialize the database if it doesn't exist.
- if info, err := db.file.Stat(); err != nil {
- return nil, err
- } else if info.Size() == 0 {
- // Initialize new files with meta pages.
- if err := db.init(); err != nil {
- return nil, err
- }
- } else {
- // Read the first meta page to determine the page size.
- var buf [0x1000]byte
- if _, err := db.file.ReadAt(buf[:], 0); err == nil {
- m := db.pageInBuffer(buf[:], 0).meta()
- if err := m.validate(); err != nil {
- // If we can't read the page size, we can assume it's the same
- // as the OS -- since that's how the page size was chosen in the
- // first place.
- //
- // If the first page is invalid and this OS uses a different
- // page size than what the database was created with then we
- // are out of luck and cannot access the database.
- db.pageSize = os.Getpagesize()
- } else {
- db.pageSize = int(m.pageSize)
- }
- }
- }
-
- // Initialize page pool.
- db.pagePool = sync.Pool{
- New: func() interface{} {
- return make([]byte, db.pageSize)
- },
- }
-
- // Memory map the data file.
- if err := db.mmap(options.InitialMmapSize); err != nil {
- _ = db.close()
- return nil, err
- }
-
- // Read in the freelist.
- db.freelist = newFreelist()
- db.freelist.read(db.page(db.meta().freelist))
-
- // Mark the database as opened and return.
- return db, nil
-}
-
-// mmap opens the underlying memory-mapped file and initializes the meta references.
-// minsz is the minimum size that the new mmap can be.
-func (db *DB) mmap(minsz int) error {
- db.mmaplock.Lock()
- defer db.mmaplock.Unlock()
-
- info, err := db.file.Stat()
- if err != nil {
- return fmt.Errorf("mmap stat error: %s", err)
- } else if int(info.Size()) < db.pageSize*2 {
- return fmt.Errorf("file size too small")
- }
-
- // Ensure the size is at least the minimum size.
- var size = int(info.Size())
- if size < minsz {
- size = minsz
- }
- size, err = db.mmapSize(size)
- if err != nil {
- return err
- }
-
- // Dereference all mmap references before unmapping.
- if db.rwtx != nil {
- db.rwtx.root.dereference()
- }
-
- // Unmap existing data before continuing.
- if err := db.munmap(); err != nil {
- return err
- }
-
- // Memory-map the data file as a byte slice.
- if err := mmap(db, size); err != nil {
- return err
- }
-
- // Save references to the meta pages.
- db.meta0 = db.page(0).meta()
- db.meta1 = db.page(1).meta()
-
- // Validate the meta pages. We only return an error if both meta pages fail
- // validation, since meta0 failing validation means that it wasn't saved
- // properly -- but we can recover using meta1. And vice-versa.
- err0 := db.meta0.validate()
- err1 := db.meta1.validate()
- if err0 != nil && err1 != nil {
- return err0
- }
-
- return nil
-}
-
-// munmap unmaps the data file from memory.
-func (db *DB) munmap() error {
- if err := munmap(db); err != nil {
- return fmt.Errorf("unmap error: " + err.Error())
- }
- return nil
-}
-
-// mmapSize determines the appropriate size for the mmap given the current size
-// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
-// Returns an error if the new mmap size is greater than the max allowed.
-func (db *DB) mmapSize(size int) (int, error) {
- // Double the size from 32KB until 1GB.
- for i := uint(15); i <= 30; i++ {
- if size <= 1<<i {
- return 1 << i, nil
- }
- }
-
- // Verify the requested size is not above the maximum allowed.
- if size > maxMapSize {
- return 0, fmt.Errorf("mmap too large")
- }
-
- // If larger than 1GB then grow by 1GB at a time.
- sz := int64(size)
- if remainder := sz % int64(maxMmapStep); remainder > 0 {
- sz += int64(maxMmapStep) - remainder
- }
-
- // Ensure that the mmap size is a multiple of the page size.
- // This should always be true since we're incrementing in MBs.
- pageSize := int64(db.pageSize)
- if (sz % pageSize) != 0 {
- sz = ((sz / pageSize) + 1) * pageSize
- }
-
- // If we've exceeded the max size then only grow up to the max size.
- if sz > maxMapSize {
- sz = maxMapSize
- }
-
- return int(sz), nil
-}
-
-// init creates a new database file and initializes its meta pages.
-func (db *DB) init() error {
- // Set the page size to the OS page size.
- db.pageSize = os.Getpagesize()
-
- // Create two meta pages on a buffer.
- buf := make([]byte, db.pageSize*4)
- for i := 0; i < 2; i++ {
- p := db.pageInBuffer(buf[:], pgid(i))
- p.id = pgid(i)
- p.flags = metaPageFlag
-
- // Initialize the meta page.
- m := p.meta()
- m.magic = magic
- m.version = version
- m.pageSize = uint32(db.pageSize)
- m.freelist = 2
- m.root = bucket{root: 3}
- m.pgid = 4
- m.txid = txid(i)
- m.checksum = m.sum64()
- }
-
- // Write an empty freelist at page 3.
- p := db.pageInBuffer(buf[:], pgid(2))
- p.id = pgid(2)
- p.flags = freelistPageFlag
- p.count = 0
-
- // Write an empty leaf page at page 4.
- p = db.pageInBuffer(buf[:], pgid(3))
- p.id = pgid(3)
- p.flags = leafPageFlag
- p.count = 0
-
- // Write the buffer to our data file.
- if _, err := db.ops.writeAt(buf, 0); err != nil {
- return err
- }
- if err := fdatasync(db); err != nil {
- return err
- }
-
- return nil
-}
-
-// Close releases all database resources.
-// All transactions must be closed before closing the database.
-func (db *DB) Close() error {
- db.rwlock.Lock()
- defer db.rwlock.Unlock()
-
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
- db.mmaplock.RLock()
- defer db.mmaplock.RUnlock()
-
- return db.close()
-}
-
-func (db *DB) close() error {
- if !db.opened {
- return nil
- }
-
- db.opened = false
-
- db.freelist = nil
-
- // Clear ops.
- db.ops.writeAt = nil
-
- // Close the mmap.
- if err := db.munmap(); err != nil {
- return err
- }
-
- // Close file handles.
- if db.file != nil {
- // No need to unlock read-only file.
- if !db.readOnly {
- // Unlock the file.
- if err := funlock(db); err != nil {
- log.Printf("bolt.Close(): funlock error: %s", err)
- }
- }
-
- // Close the file descriptor.
- if err := db.file.Close(); err != nil {
- return fmt.Errorf("db file close: %s", err)
- }
- db.file = nil
- }
-
- db.path = ""
- return nil
-}
-
-// Begin starts a new transaction.
-// Multiple read-only transactions can be used concurrently but only one
-// write transaction can be used at a time. Starting multiple write transactions
-// will cause the calls to block and be serialized until the current write
-// transaction finishes.
-//
-// Transactions should not be dependent on one another. Opening a read
-// transaction and a write transaction in the same goroutine can cause the
-// writer to deadlock because the database periodically needs to re-mmap itself
-// as it grows and it cannot do that while a read transaction is open.
-//
-// If a long running read transaction (for example, a snapshot transaction) is
-// needed, you might want to set DB.InitialMmapSize to a large enough value
-// to avoid potential blocking of write transaction.
-//
-// IMPORTANT: You must close read-only transactions after you are finished or
-// else the database will not reclaim old pages.
-func (db *DB) Begin(writable bool) (*Tx, error) {
- if writable {
- return db.beginRWTx()
- }
- return db.beginTx()
-}
-
-func (db *DB) beginTx() (*Tx, error) {
- // Lock the meta pages while we initialize the transaction. We obtain
- // the meta lock before the mmap lock because that's the order that the
- // write transaction will obtain them.
- db.metalock.Lock()
-
- // Obtain a read-only lock on the mmap. When the mmap is remapped it will
- // obtain a write lock so all transactions must finish before it can be
- // remapped.
- db.mmaplock.RLock()
-
- // Exit if the database is not open yet.
- if !db.opened {
- db.mmaplock.RUnlock()
- db.metalock.Unlock()
- return nil, ErrDatabaseNotOpen
- }
-
- // Create a transaction associated with the database.
- t := &Tx{}
- t.init(db)
-
- // Keep track of transaction until it closes.
- db.txs = append(db.txs, t)
- n := len(db.txs)
-
- // Unlock the meta pages.
- db.metalock.Unlock()
-
- // Update the transaction stats.
- db.statlock.Lock()
- db.stats.TxN++
- db.stats.OpenTxN = n
- db.statlock.Unlock()
-
- return t, nil
-}
-
-func (db *DB) beginRWTx() (*Tx, error) {
- // If the database was opened with Options.ReadOnly, return an error.
- if db.readOnly {
- return nil, ErrDatabaseReadOnly
- }
-
- // Obtain writer lock. This is released by the transaction when it closes.
- // This enforces only one writer transaction at a time.
- db.rwlock.Lock()
-
- // Once we have the writer lock then we can lock the meta pages so that
- // we can set up the transaction.
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
- // Exit if the database is not open yet.
- if !db.opened {
- db.rwlock.Unlock()
- return nil, ErrDatabaseNotOpen
- }
-
- // Create a transaction associated with the database.
- t := &Tx{writable: true}
- t.init(db)
- db.rwtx = t
-
- // Free any pages associated with closed read-only transactions.
- var minid txid = 0xFFFFFFFFFFFFFFFF
- for _, t := range db.txs {
- if t.meta.txid < minid {
- minid = t.meta.txid
- }
- }
- if minid > 0 {
- db.freelist.release(minid - 1)
- }
-
- return t, nil
-}
-
-// removeTx removes a transaction from the database.
-func (db *DB) removeTx(tx *Tx) {
- // Release the read lock on the mmap.
- db.mmaplock.RUnlock()
-
- // Use the meta lock to restrict access to the DB object.
- db.metalock.Lock()
-
- // Remove the transaction.
- for i, t := range db.txs {
- if t == tx {
- db.txs = append(db.txs[:i], db.txs[i+1:]...)
- break
- }
- }
- n := len(db.txs)
-
- // Unlock the meta pages.
- db.metalock.Unlock()
-
- // Merge statistics.
- db.statlock.Lock()
- db.stats.OpenTxN = n
- db.stats.TxStats.add(&tx.stats)
- db.statlock.Unlock()
-}
-
-// Update executes a function within the context of a read-write managed transaction.
-// If no error is returned from the function then the transaction is committed.
-// If an error is returned then the entire transaction is rolled back.
-// Any error that is returned from the function or returned from the commit is
-// returned from the Update() method.
-//
-// Attempting to manually commit or rollback within the function will cause a panic.
-func (db *DB) Update(fn func(*Tx) error) error {
- t, err := db.Begin(true)
- if err != nil {
- return err
- }
-
- // Make sure the transaction rolls back in the event of a panic.
- defer func() {
- if t.db != nil {
- t.rollback()
- }
- }()
-
- // Mark as a managed tx so that the inner function cannot manually commit.
- t.managed = true
-
- // If an error is returned from the function then rollback and return error.
- err = fn(t)
- t.managed = false
- if err != nil {
- _ = t.Rollback()
- return err
- }
-
- return t.Commit()
-}
-
-// View executes a function within the context of a managed read-only transaction.
-// Any error that is returned from the function is returned from the View() method.
-//
-// Attempting to manually rollback within the function will cause a panic.
-func (db *DB) View(fn func(*Tx) error) error {
- t, err := db.Begin(false)
- if err != nil {
- return err
- }
-
- // Make sure the transaction rolls back in the event of a panic.
- defer func() {
- if t.db != nil {
- t.rollback()
- }
- }()
-
- // Mark as a managed tx so that the inner function cannot manually rollback.
- t.managed = true
-
- // If an error is returned from the function then pass it through.
- err = fn(t)
- t.managed = false
- if err != nil {
- _ = t.Rollback()
- return err
- }
-
- if err := t.Rollback(); err != nil {
- return err
- }
-
- return nil
-}
-
-// Batch calls fn as part of a batch. It behaves similar to Update,
-// except:
-//
-// 1. concurrent Batch calls can be combined into a single Bolt
-// transaction.
-//
-// 2. the function passed to Batch may be called multiple times,
-// regardless of whether it returns error or not.
-//
-// This means that Batch function side effects must be idempotent and
-// take permanent effect only after a successful return is seen in
-// caller.
-//
-// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
-// and DB.MaxBatchDelay, respectively.
-//
-// Batch is only useful when there are multiple goroutines calling it.
-func (db *DB) Batch(fn func(*Tx) error) error {
- errCh := make(chan error, 1)
-
- db.batchMu.Lock()
- if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
- // There is no existing batch, or the existing batch is full; start a new one.
- db.batch = &batch{
- db: db,
- }
- db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
- }
- db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
- if len(db.batch.calls) >= db.MaxBatchSize {
- // wake up batch, it's ready to run
- go db.batch.trigger()
- }
- db.batchMu.Unlock()
-
- err := <-errCh
- if err == trySolo {
- err = db.Update(fn)
- }
- return err
-}
-
-type call struct {
- fn func(*Tx) error
- err chan<- error
-}
-
-type batch struct {
- db *DB
- timer *time.Timer
- start sync.Once
- calls []call
-}
-
-// trigger runs the batch if it hasn't already been run.
-func (b *batch) trigger() {
- b.start.Do(b.run)
-}
-
-// run performs the transactions in the batch and communicates results
-// back to DB.Batch.
-func (b *batch) run() {
- b.db.batchMu.Lock()
- b.timer.Stop()
- // Make sure no new work is added to this batch, but don't break
- // other batches.
- if b.db.batch == b {
- b.db.batch = nil
- }
- b.db.batchMu.Unlock()
-
-retry:
- for len(b.calls) > 0 {
- var failIdx = -1
- err := b.db.Update(func(tx *Tx) error {
- for i, c := range b.calls {
- if err := safelyCall(c.fn, tx); err != nil {
- failIdx = i
- return err
- }
- }
- return nil
- })
-
- if failIdx >= 0 {
- // take the failing transaction out of the batch. it's
- // safe to shorten b.calls here because db.batch no longer
- // points to us, and we hold the mutex anyway.
- c := b.calls[failIdx]
- b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
- // tell the submitter re-run it solo, continue with the rest of the batch
- c.err <- trySolo
- continue retry
- }
-
- // pass success, or bolt internal errors, to all callers
- for _, c := range b.calls {
- if c.err != nil {
- c.err <- err
- }
- }
- break retry
- }
-}
-
-// trySolo is a special sentinel error value used for signaling that a
-// transaction function should be re-run. It should never be seen by
-// callers.
-var trySolo = errors.New("batch function returned an error and should be re-run solo")
-
-type panicked struct {
- reason interface{}
-}
-
-func (p panicked) Error() string {
- if err, ok := p.reason.(error); ok {
- return err.Error()
- }
- return fmt.Sprintf("panic: %v", p.reason)
-}
-
-func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
- defer func() {
- if p := recover(); p != nil {
- err = panicked{p}
- }
- }()
- return fn(tx)
-}
-
-// Sync executes fdatasync() against the database file handle.
-//
-// This is not necessary under normal operation, however, if you use NoSync
-// then it allows you to force the database file to sync against the disk.
-func (db *DB) Sync() error { return fdatasync(db) }
-
-// Stats retrieves ongoing performance stats for the database.
-// This is only updated when a transaction closes.
-func (db *DB) Stats() Stats {
- db.statlock.RLock()
- defer db.statlock.RUnlock()
- return db.stats
-}
-
-// This is for internal access to the raw data bytes from the C cursor, use
-// carefully, or not at all.
-func (db *DB) Info() *Info {
- return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
-}
-
-// page retrieves a page reference from the mmap based on the current page size.
-func (db *DB) page(id pgid) *page {
- pos := id * pgid(db.pageSize)
- return (*page)(unsafe.Pointer(&db.data[pos]))
-}
-
-// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
-func (db *DB) pageInBuffer(b []byte, id pgid) *page {
- return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
-}
-
-// meta retrieves the current meta page reference.
-func (db *DB) meta() *meta {
- // We have to return the meta with the highest txid which doesn't fail
- // validation. Otherwise, we can cause errors when in fact the database is
- // in a consistent state. metaA is the one with the higher txid.
- metaA := db.meta0
- metaB := db.meta1
- if db.meta1.txid > db.meta0.txid {
- metaA = db.meta1
- metaB = db.meta0
- }
-
- // Use higher meta page if valid. Otherwise fallback to previous, if valid.
- if err := metaA.validate(); err == nil {
- return metaA
- } else if err := metaB.validate(); err == nil {
- return metaB
- }
-
- // This should never be reached, because both meta1 and meta0 were validated
- // on mmap() and we do fsync() on every write.
- panic("bolt.DB.meta(): invalid meta pages")
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (db *DB) allocate(count int) (*page, error) {
- // Allocate a temporary buffer for the page.
- var buf []byte
- if count == 1 {
- buf = db.pagePool.Get().([]byte)
- } else {
- buf = make([]byte, count*db.pageSize)
- }
- p := (*page)(unsafe.Pointer(&buf[0]))
- p.overflow = uint32(count - 1)
-
- // Use pages from the freelist if they are available.
- if p.id = db.freelist.allocate(count); p.id != 0 {
- return p, nil
- }
-
- // Resize mmap() if we're at the end.
- p.id = db.rwtx.meta.pgid
- var minsz = int((p.id+pgid(count))+1) * db.pageSize
- if minsz >= db.datasz {
- if err := db.mmap(minsz); err != nil {
- return nil, fmt.Errorf("mmap allocate error: %s", err)
- }
- }
-
- // Move the page id high water mark.
- db.rwtx.meta.pgid += pgid(count)
-
- return p, nil
-}
-
-// grow grows the size of the database to the given sz.
-func (db *DB) grow(sz int) error {
- // Ignore if the new size is less than available file size.
- if sz <= db.filesz {
- return nil
- }
-
- // If the data is smaller than the alloc size then only allocate what's needed.
- // Once it goes over the allocation size then allocate in chunks.
- if db.datasz < db.AllocSize {
- sz = db.datasz
- } else {
- sz += db.AllocSize
- }
-
- // Truncate and fsync to ensure file size metadata is flushed.
- // https://github.com/boltdb/bolt/issues/284
- if !db.NoGrowSync && !db.readOnly {
- if runtime.GOOS != "windows" {
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("file resize error: %s", err)
- }
- }
- if err := db.file.Sync(); err != nil {
- return fmt.Errorf("file sync error: %s", err)
- }
- }
-
- db.filesz = sz
- return nil
-}
-
-func (db *DB) IsReadOnly() bool {
- return db.readOnly
-}
-
-// Options represents the options that can be set when opening a database.
-type Options struct {
- // Timeout is the amount of time to wait to obtain a file lock.
- // When set to zero it will wait indefinitely. This option is only
- // available on Darwin and Linux.
- Timeout time.Duration
-
- // Sets the DB.NoGrowSync flag before memory mapping the file.
- NoGrowSync bool
-
- // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
- // grab a shared lock (UNIX).
- ReadOnly bool
-
- // Sets the DB.MmapFlags flag before memory mapping the file.
- MmapFlags int
-
- // InitialMmapSize is the initial mmap size of the database
- // in bytes. Read transactions won't block write transaction
- // if the InitialMmapSize is large enough to hold database mmap
- // size. (See DB.Begin for more information)
- //
- // If <=0, the initial map size is 0.
- // If initialMmapSize is smaller than the previous database size,
- // it takes no effect.
- InitialMmapSize int
-}
-
-// DefaultOptions represent the options used if nil options are passed into Open().
-// No timeout is used which will cause Bolt to wait indefinitely for a lock.
-var DefaultOptions = &Options{
- Timeout: 0,
- NoGrowSync: false,
-}
-
-// Stats represents statistics about the database.
-type Stats struct {
- // Freelist stats
- FreePageN int // total number of free pages on the freelist
- PendingPageN int // total number of pending pages on the freelist
- FreeAlloc int // total bytes allocated in free pages
- FreelistInuse int // total bytes used by the freelist
-
- // Transaction stats
- TxN int // total number of started read transactions
- OpenTxN int // number of currently open read transactions
-
- TxStats TxStats // global, ongoing stats.
-}
-
-// Sub calculates and returns the difference between two sets of database stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *Stats) Sub(other *Stats) Stats {
- if other == nil {
- return *s
- }
- var diff Stats
- diff.FreePageN = s.FreePageN
- diff.PendingPageN = s.PendingPageN
- diff.FreeAlloc = s.FreeAlloc
- diff.FreelistInuse = s.FreelistInuse
- diff.TxN = other.TxN - s.TxN
- diff.TxStats = s.TxStats.Sub(&other.TxStats)
- return diff
-}
-
-func (s *Stats) add(other *Stats) {
- s.TxStats.add(&other.TxStats)
-}
-
-type Info struct {
- Data uintptr
- PageSize int
-}
-
-type meta struct {
- magic uint32
- version uint32
- pageSize uint32
- flags uint32
- root bucket
- freelist pgid
- pgid pgid
- txid txid
- checksum uint64
-}
-
-// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
-func (m *meta) validate() error {
- if m.magic != magic {
- return ErrInvalid
- } else if m.version != version {
- return ErrVersionMismatch
- } else if m.checksum != 0 && m.checksum != m.sum64() {
- return ErrChecksum
- }
- return nil
-}
-
-// copy copies one meta object to another.
-func (m *meta) copy(dest *meta) {
- *dest = *m
-}
-
-// write writes the meta onto a page.
-func (m *meta) write(p *page) {
- if m.root.root >= m.pgid {
- panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
- } else if m.freelist >= m.pgid {
- panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
- }
-
- // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
- p.id = pgid(m.txid % 2)
- p.flags |= metaPageFlag
-
- // Calculate the checksum.
- m.checksum = m.sum64()
-
- m.copy(p.meta())
-}
-
-// generates the checksum for the meta.
-func (m *meta) sum64() uint64 {
- var h = fnv.New64a()
- _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
- return h.Sum64()
-}
-
-// _assert will panic with a given formatted message if the given condition is false.
-func _assert(condition bool, msg string, v ...interface{}) {
- if !condition {
- panic(fmt.Sprintf("assertion failed: "+msg, v...))
- }
-}
-
-func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
-func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
-
-func printstack() {
- stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
- fmt.Fprintln(os.Stderr, stack)
-}
diff --git a/vendor/github.com/coreos/bbolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go
deleted file mode 100644
index cc93784..0000000
--- a/vendor/github.com/coreos/bbolt/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Package bolt implements a low-level key/value store in pure Go. It supports
-fully serializable transactions, ACID semantics, and lock-free MVCC with
-multiple readers and a single writer. Bolt can be used for projects that
-want a simple data store without the need to add large dependencies such as
-Postgres or MySQL.
-
-Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
-optimized for fast read access and does not require recovery in the event of a
-system crash. Transactions which have not finished committing will simply be
-rolled back in the event of a crash.
-
-The design of Bolt is based on Howard Chu's LMDB database project.
-
-Bolt currently works on Windows, Mac OS X, and Linux.
-
-
-Basics
-
-There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
-a collection of buckets and is represented by a single file on disk. A bucket is
-a collection of unique keys that are associated with values.
-
-Transactions provide either read-only or read-write access to the database.
-Read-only transactions can retrieve key/value pairs and can use Cursors to
-iterate over the dataset sequentially. Read-write transactions can create and
-delete buckets and can insert and remove keys. Only one read-write transaction
-is allowed at a time.
-
-
-Caveats
-
-The database uses a read-only, memory-mapped data file to ensure that
-applications cannot corrupt the database, however, this means that keys and
-values returned from Bolt cannot be changed. Writing to a read-only byte slice
-will cause Go to panic.
-
-Keys and values retrieved from the database are only valid for the life of
-the transaction. When used outside the transaction, these byte slices can
-point to different data or can point to invalid memory which will cause a panic.
-
-
-*/
-package bolt
diff --git a/vendor/github.com/coreos/bbolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go
deleted file mode 100644
index a3620a3..0000000
--- a/vendor/github.com/coreos/bbolt/errors.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package bolt
-
-import "errors"
-
-// These errors can be returned when opening or calling methods on a DB.
-var (
- // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
- // is opened or after it is closed.
- ErrDatabaseNotOpen = errors.New("database not open")
-
- // ErrDatabaseOpen is returned when opening a database that is
- // already open.
- ErrDatabaseOpen = errors.New("database already open")
-
- // ErrInvalid is returned when both meta pages on a database are invalid.
- // This typically occurs when a file is not a bolt database.
- ErrInvalid = errors.New("invalid database")
-
- // ErrVersionMismatch is returned when the data file was created with a
- // different version of Bolt.
- ErrVersionMismatch = errors.New("version mismatch")
-
- // ErrChecksum is returned when either meta page checksum does not match.
- ErrChecksum = errors.New("checksum error")
-
- // ErrTimeout is returned when a database cannot obtain an exclusive lock
- // on the data file after the timeout passed to Open().
- ErrTimeout = errors.New("timeout")
-)
-
-// These errors can occur when beginning or committing a Tx.
-var (
- // ErrTxNotWritable is returned when performing a write operation on a
- // read-only transaction.
- ErrTxNotWritable = errors.New("tx not writable")
-
- // ErrTxClosed is returned when committing or rolling back a transaction
- // that has already been committed or rolled back.
- ErrTxClosed = errors.New("tx closed")
-
- // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
- // read-only database.
- ErrDatabaseReadOnly = errors.New("database is in read-only mode")
-)
-
-// These errors can occur when putting or deleting a value or a bucket.
-var (
- // ErrBucketNotFound is returned when trying to access a bucket that has
- // not been created yet.
- ErrBucketNotFound = errors.New("bucket not found")
-
- // ErrBucketExists is returned when creating a bucket that already exists.
- ErrBucketExists = errors.New("bucket already exists")
-
- // ErrBucketNameRequired is returned when creating a bucket with a blank name.
- ErrBucketNameRequired = errors.New("bucket name required")
-
- // ErrKeyRequired is returned when inserting a zero-length key.
- ErrKeyRequired = errors.New("key required")
-
- // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
- ErrKeyTooLarge = errors.New("key too large")
-
- // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
- ErrValueTooLarge = errors.New("value too large")
-
- // ErrIncompatibleValue is returned when trying create or delete a bucket
- // on an existing non-bucket key or when trying to create or delete a
- // non-bucket key on an existing bucket key.
- ErrIncompatibleValue = errors.New("incompatible value")
-)
diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go
deleted file mode 100644
index 1b7ba91..0000000
--- a/vendor/github.com/coreos/bbolt/freelist.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "sort"
- "unsafe"
-)
-
-// freelist represents a list of all pages that are available for allocation.
-// It also tracks pages that have been freed but are still in use by open transactions.
-type freelist struct {
- ids []pgid // all free and available free page ids.
- pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
- cache map[pgid]bool // fast lookup of all free and pending page ids.
-}
-
-// newFreelist returns an empty, initialized freelist.
-func newFreelist() *freelist {
- return &freelist{
- pending: make(map[txid][]pgid),
- cache: make(map[pgid]bool),
- }
-}
-
-// size returns the size of the page after serialization.
-func (f *freelist) size() int {
- return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
-}
-
-// count returns count of pages on the freelist
-func (f *freelist) count() int {
- return f.free_count() + f.pending_count()
-}
-
-// free_count returns count of free pages
-func (f *freelist) free_count() int {
- return len(f.ids)
-}
-
-// pending_count returns count of pending pages
-func (f *freelist) pending_count() int {
- var count int
- for _, list := range f.pending {
- count += len(list)
- }
- return count
-}
-
-// all returns a list of all free ids and all pending ids in one sorted list.
-func (f *freelist) all() []pgid {
- m := make(pgids, 0)
-
- for _, list := range f.pending {
- m = append(m, list...)
- }
-
- sort.Sort(m)
- return pgids(f.ids).merge(m)
-}
-
-// allocate returns the starting page id of a contiguous list of pages of a given size.
-// If a contiguous block cannot be found then 0 is returned.
-func (f *freelist) allocate(n int) pgid {
- if len(f.ids) == 0 {
- return 0
- }
-
- var initial, previd pgid
- for i, id := range f.ids {
- if id <= 1 {
- panic(fmt.Sprintf("invalid page allocation: %d", id))
- }
-
- // Reset initial page if this is not contiguous.
- if previd == 0 || id-previd != 1 {
- initial = id
- }
-
- // If we found a contiguous block then remove it and return it.
- if (id-initial)+1 == pgid(n) {
- // If we're allocating off the beginning then take the fast path
- // and just adjust the existing slice. This will use extra memory
- // temporarily but the append() in free() will realloc the slice
- // as is necessary.
- if (i + 1) == n {
- f.ids = f.ids[i+1:]
- } else {
- copy(f.ids[i-n+1:], f.ids[i+1:])
- f.ids = f.ids[:len(f.ids)-n]
- }
-
- // Remove from the free cache.
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, initial+i)
- }
-
- return initial
- }
-
- previd = id
- }
- return 0
-}
-
-// free releases a page and its overflow for a given transaction id.
-// If the page is already free then a panic will occur.
-func (f *freelist) free(txid txid, p *page) {
- if p.id <= 1 {
- panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
- }
-
- // Free page and all its overflow pages.
- var ids = f.pending[txid]
- for id := p.id; id <= p.id+pgid(p.overflow); id++ {
- // Verify that page is not already free.
- if f.cache[id] {
- panic(fmt.Sprintf("page %d already freed", id))
- }
-
- // Add to the freelist and cache.
- ids = append(ids, id)
- f.cache[id] = true
- }
- f.pending[txid] = ids
-}
-
-// release moves all page ids for a transaction id (or older) to the freelist.
-func (f *freelist) release(txid txid) {
- m := make(pgids, 0)
- for tid, ids := range f.pending {
- if tid <= txid {
- // Move transaction's pending pages to the available freelist.
- // Don't remove from the cache since the page is still free.
- m = append(m, ids...)
- delete(f.pending, tid)
- }
- }
- sort.Sort(m)
- f.ids = pgids(f.ids).merge(m)
-}
-
-// rollback removes the pages from a given pending tx.
-func (f *freelist) rollback(txid txid) {
- // Remove page ids from cache.
- for _, id := range f.pending[txid] {
- delete(f.cache, id)
- }
-
- // Remove pages from pending list.
- delete(f.pending, txid)
-}
-
-// freed returns whether a given page is in the free list.
-func (f *freelist) freed(pgid pgid) bool {
- return f.cache[pgid]
-}
-
-// read initializes the freelist from a freelist page.
-func (f *freelist) read(p *page) {
- // If the page.count is at the max uint16 value (64k) then it's considered
- // an overflow and the size of the freelist is stored as the first element.
- idx, count := 0, int(p.count)
- if count == 0xFFFF {
- idx = 1
- count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
- }
-
- // Copy the list of page ids from the freelist.
- if count == 0 {
- f.ids = nil
- } else {
- ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
- f.ids = make([]pgid, len(ids))
- copy(f.ids, ids)
-
- // Make sure they're sorted.
- sort.Sort(pgids(f.ids))
- }
-
- // Rebuild the page cache.
- f.reindex()
-}
-
-// write writes the page ids onto a freelist page. All free and pending ids are
-// saved to disk since in the event of a program crash, all pending ids will
-// become free.
-func (f *freelist) write(p *page) error {
- // Combine the old free pgids and pgids waiting on an open transaction.
- ids := f.all()
-
- // Update the header flag.
- p.flags |= freelistPageFlag
-
- // The page.count can only hold up to 64k elements so if we overflow that
- // number then we handle it by putting the size in the first element.
- if len(ids) == 0 {
- p.count = uint16(len(ids))
- } else if len(ids) < 0xFFFF {
- p.count = uint16(len(ids))
- copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
- } else {
- p.count = 0xFFFF
- ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
- copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
- }
-
- return nil
-}
-
-// reload reads the freelist from a page and filters out pending items.
-func (f *freelist) reload(p *page) {
- f.read(p)
-
- // Build a cache of only pending pages.
- pcache := make(map[pgid]bool)
- for _, pendingIDs := range f.pending {
- for _, pendingID := range pendingIDs {
- pcache[pendingID] = true
- }
- }
-
- // Check each page in the freelist and build a new available freelist
- // with any pages not in the pending lists.
- var a []pgid
- for _, id := range f.ids {
- if !pcache[id] {
- a = append(a, id)
- }
- }
- f.ids = a
-
- // Once the available list is rebuilt then rebuild the free cache so that
- // it includes the available and pending free pages.
- f.reindex()
-}
-
-// reindex rebuilds the free cache based on available and pending free lists.
-func (f *freelist) reindex() {
- f.cache = make(map[pgid]bool)
- for _, id := range f.ids {
- f.cache[id] = true
- }
- for _, pendingIDs := range f.pending {
- for _, pendingID := range pendingIDs {
- f.cache[pendingID] = true
- }
- }
-}
diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/github.com/coreos/bbolt/node.go
deleted file mode 100644
index 159318b..0000000
--- a/vendor/github.com/coreos/bbolt/node.go
+++ /dev/null
@@ -1,604 +0,0 @@
-package bolt
-
-import (
- "bytes"
- "fmt"
- "sort"
- "unsafe"
-)
-
-// node represents an in-memory, deserialized page.
-type node struct {
- bucket *Bucket
- isLeaf bool
- unbalanced bool
- spilled bool
- key []byte
- pgid pgid
- parent *node
- children nodes
- inodes inodes
-}
-
-// root returns the top-level node this node is attached to.
-func (n *node) root() *node {
- if n.parent == nil {
- return n
- }
- return n.parent.root()
-}
-
-// minKeys returns the minimum number of inodes this node should have.
-func (n *node) minKeys() int {
- if n.isLeaf {
- return 1
- }
- return 2
-}
-
-// size returns the size of the node after serialization.
-func (n *node) size() int {
- sz, elsz := pageHeaderSize, n.pageElementSize()
- for i := 0; i < len(n.inodes); i++ {
- item := &n.inodes[i]
- sz += elsz + len(item.key) + len(item.value)
- }
- return sz
-}
-
-// sizeLessThan returns true if the node is less than a given size.
-// This is an optimization to avoid calculating a large node when we only need
-// to know if it fits inside a certain page size.
-func (n *node) sizeLessThan(v int) bool {
- sz, elsz := pageHeaderSize, n.pageElementSize()
- for i := 0; i < len(n.inodes); i++ {
- item := &n.inodes[i]
- sz += elsz + len(item.key) + len(item.value)
- if sz >= v {
- return false
- }
- }
- return true
-}
-
-// pageElementSize returns the size of each page element based on the type of node.
-func (n *node) pageElementSize() int {
- if n.isLeaf {
- return leafPageElementSize
- }
- return branchPageElementSize
-}
-
-// childAt returns the child node at a given index.
-func (n *node) childAt(index int) *node {
- if n.isLeaf {
- panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
- }
- return n.bucket.node(n.inodes[index].pgid, n)
-}
-
-// childIndex returns the index of a given child node.
-func (n *node) childIndex(child *node) int {
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
- return index
-}
-
-// numChildren returns the number of children.
-func (n *node) numChildren() int {
- return len(n.inodes)
-}
-
-// nextSibling returns the next node with the same parent.
-func (n *node) nextSibling() *node {
- if n.parent == nil {
- return nil
- }
- index := n.parent.childIndex(n)
- if index >= n.parent.numChildren()-1 {
- return nil
- }
- return n.parent.childAt(index + 1)
-}
-
-// prevSibling returns the previous node with the same parent.
-func (n *node) prevSibling() *node {
- if n.parent == nil {
- return nil
- }
- index := n.parent.childIndex(n)
- if index == 0 {
- return nil
- }
- return n.parent.childAt(index - 1)
-}
-
-// put inserts a key/value.
-func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
- if pgid >= n.bucket.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
- } else if len(oldKey) <= 0 {
- panic("put: zero-length old key")
- } else if len(newKey) <= 0 {
- panic("put: zero-length new key")
- }
-
- // Find insertion index.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
-
- // Add capacity and shift nodes if we don't have an exact match and need to insert.
- exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
- if !exact {
- n.inodes = append(n.inodes, inode{})
- copy(n.inodes[index+1:], n.inodes[index:])
- }
-
- inode := &n.inodes[index]
- inode.flags = flags
- inode.key = newKey
- inode.value = value
- inode.pgid = pgid
- _assert(len(inode.key) > 0, "put: zero-length inode key")
-}
-
-// del removes a key from the node.
-func (n *node) del(key []byte) {
- // Find index of key.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
-
- // Exit if the key isn't found.
- if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
- return
- }
-
- // Delete inode from the node.
- n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
-
- // Mark the node as needing rebalancing.
- n.unbalanced = true
-}
-
-// read initializes the node from a page.
-func (n *node) read(p *page) {
- n.pgid = p.id
- n.isLeaf = ((p.flags & leafPageFlag) != 0)
- n.inodes = make(inodes, int(p.count))
-
- for i := 0; i < int(p.count); i++ {
- inode := &n.inodes[i]
- if n.isLeaf {
- elem := p.leafPageElement(uint16(i))
- inode.flags = elem.flags
- inode.key = elem.key()
- inode.value = elem.value()
- } else {
- elem := p.branchPageElement(uint16(i))
- inode.pgid = elem.pgid
- inode.key = elem.key()
- }
- _assert(len(inode.key) > 0, "read: zero-length inode key")
- }
-
- // Save first key so we can find the node in the parent when we spill.
- if len(n.inodes) > 0 {
- n.key = n.inodes[0].key
- _assert(len(n.key) > 0, "read: zero-length node key")
- } else {
- n.key = nil
- }
-}
-
-// write writes the items onto one or more pages.
-func (n *node) write(p *page) {
- // Initialize page.
- if n.isLeaf {
- p.flags |= leafPageFlag
- } else {
- p.flags |= branchPageFlag
- }
-
- if len(n.inodes) >= 0xFFFF {
- panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
- }
- p.count = uint16(len(n.inodes))
-
- // Stop here if there are no items to write.
- if p.count == 0 {
- return
- }
-
- // Loop over each item and write it to the page.
- b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
- for i, item := range n.inodes {
- _assert(len(item.key) > 0, "write: zero-length inode key")
-
- // Write the page element.
- if n.isLeaf {
- elem := p.leafPageElement(uint16(i))
- elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
- elem.flags = item.flags
- elem.ksize = uint32(len(item.key))
- elem.vsize = uint32(len(item.value))
- } else {
- elem := p.branchPageElement(uint16(i))
- elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
- elem.ksize = uint32(len(item.key))
- elem.pgid = item.pgid
- _assert(elem.pgid != p.id, "write: circular dependency occurred")
- }
-
- // If the length of key+value is larger than the max allocation size
- // then we need to reallocate the byte array pointer.
- //
- // See: https://github.com/boltdb/bolt/pull/335
- klen, vlen := len(item.key), len(item.value)
- if len(b) < klen+vlen {
- b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
- }
-
- // Write data for the element to the end of the page.
- copy(b[0:], item.key)
- b = b[klen:]
- copy(b[0:], item.value)
- b = b[vlen:]
- }
-
- // DEBUG ONLY: n.dump()
-}
-
-// split breaks up a node into multiple smaller nodes, if appropriate.
-// This should only be called from the spill() function.
-func (n *node) split(pageSize int) []*node {
- var nodes []*node
-
- node := n
- for {
- // Split node into two.
- a, b := node.splitTwo(pageSize)
- nodes = append(nodes, a)
-
- // If we can't split then exit the loop.
- if b == nil {
- break
- }
-
- // Set node to b so it gets split on the next iteration.
- node = b
- }
-
- return nodes
-}
-
-// splitTwo breaks up a node into two smaller nodes, if appropriate.
-// This should only be called from the split() function.
-func (n *node) splitTwo(pageSize int) (*node, *node) {
- // Ignore the split if the page doesn't have at least enough nodes for
- // two pages or if the nodes can fit in a single page.
- if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
- return n, nil
- }
-
- // Determine the threshold before starting a new node.
- var fillPercent = n.bucket.FillPercent
- if fillPercent < minFillPercent {
- fillPercent = minFillPercent
- } else if fillPercent > maxFillPercent {
- fillPercent = maxFillPercent
- }
- threshold := int(float64(pageSize) * fillPercent)
-
- // Determine split position and sizes of the two pages.
- splitIndex, _ := n.splitIndex(threshold)
-
- // Split node into two separate nodes.
- // If there's no parent then we'll need to create one.
- if n.parent == nil {
- n.parent = &node{bucket: n.bucket, children: []*node{n}}
- }
-
- // Create a new node and add it to the parent.
- next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
- n.parent.children = append(n.parent.children, next)
-
- // Split inodes across two nodes.
- next.inodes = n.inodes[splitIndex:]
- n.inodes = n.inodes[:splitIndex]
-
- // Update the statistics.
- n.bucket.tx.stats.Split++
-
- return n, next
-}
-
-// splitIndex finds the position where a page will fill a given threshold.
-// It returns the index as well as the size of the first page.
-// This is only be called from split().
-func (n *node) splitIndex(threshold int) (index, sz int) {
- sz = pageHeaderSize
-
- // Loop until we only have the minimum number of keys required for the second page.
- for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
- index = i
- inode := n.inodes[i]
- elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
-
- // If we have at least the minimum number of keys and adding another
- // node would put us over the threshold then exit and return.
- if i >= minKeysPerPage && sz+elsize > threshold {
- break
- }
-
- // Add the element size to the total size.
- sz += elsize
- }
-
- return
-}
-
-// spill writes the nodes to dirty pages and splits nodes as it goes.
-// Returns an error if dirty pages cannot be allocated.
-func (n *node) spill() error {
- var tx = n.bucket.tx
- if n.spilled {
- return nil
- }
-
- // Spill child nodes first. Child nodes can materialize sibling nodes in
- // the case of split-merge so we cannot use a range loop. We have to check
- // the children size on every loop iteration.
- sort.Sort(n.children)
- for i := 0; i < len(n.children); i++ {
- if err := n.children[i].spill(); err != nil {
- return err
- }
- }
-
- // We no longer need the child list because it's only used for spill tracking.
- n.children = nil
-
- // Split nodes into appropriate sizes. The first node will always be n.
- var nodes = n.split(tx.db.pageSize)
- for _, node := range nodes {
- // Add node's page to the freelist if it's not new.
- if node.pgid > 0 {
- tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
- node.pgid = 0
- }
-
- // Allocate contiguous space for the node.
- p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
- if err != nil {
- return err
- }
-
- // Write the node.
- if p.id >= tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
- }
- node.pgid = p.id
- node.write(p)
- node.spilled = true
-
- // Insert into parent inodes.
- if node.parent != nil {
- var key = node.key
- if key == nil {
- key = node.inodes[0].key
- }
-
- node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
- node.key = node.inodes[0].key
- _assert(len(node.key) > 0, "spill: zero-length node key")
- }
-
- // Update the statistics.
- tx.stats.Spill++
- }
-
- // If the root node split and created a new root then we need to spill that
- // as well. We'll clear out the children to make sure it doesn't try to respill.
- if n.parent != nil && n.parent.pgid == 0 {
- n.children = nil
- return n.parent.spill()
- }
-
- return nil
-}
-
-// rebalance attempts to combine the node with sibling nodes if the node fill
-// size is below a threshold or if there are not enough keys.
-func (n *node) rebalance() {
- if !n.unbalanced {
- return
- }
- n.unbalanced = false
-
- // Update statistics.
- n.bucket.tx.stats.Rebalance++
-
- // Ignore if node is above threshold (25%) and has enough keys.
- var threshold = n.bucket.tx.db.pageSize / 4
- if n.size() > threshold && len(n.inodes) > n.minKeys() {
- return
- }
-
- // Root node has special handling.
- if n.parent == nil {
- // If root node is a branch and only has one node then collapse it.
- if !n.isLeaf && len(n.inodes) == 1 {
- // Move root's child up.
- child := n.bucket.node(n.inodes[0].pgid, n)
- n.isLeaf = child.isLeaf
- n.inodes = child.inodes[:]
- n.children = child.children
-
- // Reparent all child nodes being moved.
- for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent = n
- }
- }
-
- // Remove old child.
- child.parent = nil
- delete(n.bucket.nodes, child.pgid)
- child.free()
- }
-
- return
- }
-
- // If node has no keys then just remove it.
- if n.numChildren() == 0 {
- n.parent.del(n.key)
- n.parent.removeChild(n)
- delete(n.bucket.nodes, n.pgid)
- n.free()
- n.parent.rebalance()
- return
- }
-
- _assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
-
- // Destination node is right sibling if idx == 0, otherwise left sibling.
- var target *node
- var useNextSibling = (n.parent.childIndex(n) == 0)
- if useNextSibling {
- target = n.nextSibling()
- } else {
- target = n.prevSibling()
- }
-
- // If both this node and the target node are too small then merge them.
- if useNextSibling {
- // Reparent all child nodes being moved.
- for _, inode := range target.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent.removeChild(child)
- child.parent = n
- child.parent.children = append(child.parent.children, child)
- }
- }
-
- // Copy over inodes from target and remove target.
- n.inodes = append(n.inodes, target.inodes...)
- n.parent.del(target.key)
- n.parent.removeChild(target)
- delete(n.bucket.nodes, target.pgid)
- target.free()
- } else {
- // Reparent all child nodes being moved.
- for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent.removeChild(child)
- child.parent = target
- child.parent.children = append(child.parent.children, child)
- }
- }
-
- // Copy over inodes to target and remove node.
- target.inodes = append(target.inodes, n.inodes...)
- n.parent.del(n.key)
- n.parent.removeChild(n)
- delete(n.bucket.nodes, n.pgid)
- n.free()
- }
-
- // Either this node or the target node was deleted from the parent so rebalance it.
- n.parent.rebalance()
-}
-
-// removes a node from the list of in-memory children.
-// This does not affect the inodes.
-func (n *node) removeChild(target *node) {
- for i, child := range n.children {
- if child == target {
- n.children = append(n.children[:i], n.children[i+1:]...)
- return
- }
- }
-}
-
-// dereference causes the node to copy all its inode key/value references to heap memory.
-// This is required when the mmap is reallocated so inodes are not pointing to stale data.
-func (n *node) dereference() {
- if n.key != nil {
- key := make([]byte, len(n.key))
- copy(key, n.key)
- n.key = key
- _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
- }
-
- for i := range n.inodes {
- inode := &n.inodes[i]
-
- key := make([]byte, len(inode.key))
- copy(key, inode.key)
- inode.key = key
- _assert(len(inode.key) > 0, "dereference: zero-length inode key")
-
- value := make([]byte, len(inode.value))
- copy(value, inode.value)
- inode.value = value
- }
-
- // Recursively dereference children.
- for _, child := range n.children {
- child.dereference()
- }
-
- // Update statistics.
- n.bucket.tx.stats.NodeDeref++
-}
-
-// free adds the node's underlying page to the freelist.
-func (n *node) free() {
- if n.pgid != 0 {
- n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
- n.pgid = 0
- }
-}
-
-// dump writes the contents of the node to STDERR for debugging purposes.
-/*
-func (n *node) dump() {
- // Write node header.
- var typ = "branch"
- if n.isLeaf {
- typ = "leaf"
- }
- warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
-
- // Write out abbreviated version of each item.
- for _, item := range n.inodes {
- if n.isLeaf {
- if item.flags&bucketLeafFlag != 0 {
- bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
- warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
- } else {
- warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
- }
- } else {
- warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
- }
- }
- warn("")
-}
-*/
-
-type nodes []*node
-
-func (s nodes) Len() int { return len(s) }
-func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
-
-// inode represents an internal node inside of a node.
-// It can be used to point to elements in a page or point
-// to an element which hasn't been added to a page yet.
-type inode struct {
- flags uint32
- pgid pgid
- key []byte
- value []byte
-}
-
-type inodes []inode
diff --git a/vendor/github.com/coreos/bbolt/page.go b/vendor/github.com/coreos/bbolt/page.go
deleted file mode 100644
index 7651a6b..0000000
--- a/vendor/github.com/coreos/bbolt/page.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "os"
- "sort"
- "unsafe"
-)
-
-const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
-
-const minKeysPerPage = 2
-
-const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
-const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
-
-const (
- branchPageFlag = 0x01
- leafPageFlag = 0x02
- metaPageFlag = 0x04
- freelistPageFlag = 0x10
-)
-
-const (
- bucketLeafFlag = 0x01
-)
-
-type pgid uint64
-
-type page struct {
- id pgid
- flags uint16
- count uint16
- overflow uint32
- ptr uintptr
-}
-
-// typ returns a human readable page type string used for debugging.
-func (p *page) typ() string {
- if (p.flags & branchPageFlag) != 0 {
- return "branch"
- } else if (p.flags & leafPageFlag) != 0 {
- return "leaf"
- } else if (p.flags & metaPageFlag) != 0 {
- return "meta"
- } else if (p.flags & freelistPageFlag) != 0 {
- return "freelist"
- }
- return fmt.Sprintf("unknown<%02x>", p.flags)
-}
-
-// meta returns a pointer to the metadata section of the page.
-func (p *page) meta() *meta {
- return (*meta)(unsafe.Pointer(&p.ptr))
-}
-
-// leafPageElement retrieves the leaf node by index
-func (p *page) leafPageElement(index uint16) *leafPageElement {
- n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
- return n
-}
-
-// leafPageElements retrieves a list of leaf nodes.
-func (p *page) leafPageElements() []leafPageElement {
- if p.count == 0 {
- return nil
- }
- return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
-}
-
-// branchPageElement retrieves the branch node by index
-func (p *page) branchPageElement(index uint16) *branchPageElement {
- return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
-}
-
-// branchPageElements retrieves a list of branch nodes.
-func (p *page) branchPageElements() []branchPageElement {
- if p.count == 0 {
- return nil
- }
- return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
-}
-
-// dump writes n bytes of the page to STDERR as hex output.
-func (p *page) hexdump(n int) {
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
- fmt.Fprintf(os.Stderr, "%x\n", buf)
-}
-
-type pages []*page
-
-func (s pages) Len() int { return len(s) }
-func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
-
-// branchPageElement represents a node on a branch page.
-type branchPageElement struct {
- pos uint32
- ksize uint32
- pgid pgid
-}
-
-// key returns a byte slice of the node key.
-func (n *branchPageElement) key() []byte {
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
- return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
-}
-
-// leafPageElement represents a node on a leaf page.
-type leafPageElement struct {
- flags uint32
- pos uint32
- ksize uint32
- vsize uint32
-}
-
-// key returns a byte slice of the node key.
-func (n *leafPageElement) key() []byte {
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
- return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
-}
-
-// value returns a byte slice of the node value.
-func (n *leafPageElement) value() []byte {
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
- return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
-}
-
-// PageInfo represents human readable information about a page.
-type PageInfo struct {
- ID int
- Type string
- Count int
- OverflowCount int
-}
-
-type pgids []pgid
-
-func (s pgids) Len() int { return len(s) }
-func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
-
-// merge returns the sorted union of a and b.
-func (a pgids) merge(b pgids) pgids {
- // Return the opposite slice if one is nil.
- if len(a) == 0 {
- return b
- } else if len(b) == 0 {
- return a
- }
-
- // Create a list to hold all elements from both lists.
- merged := make(pgids, 0, len(a)+len(b))
-
- // Assign lead to the slice with a lower starting value, follow to the higher value.
- lead, follow := a, b
- if b[0] < a[0] {
- lead, follow = b, a
- }
-
- // Continue while there are elements in the lead.
- for len(lead) > 0 {
- // Merge largest prefix of lead that is ahead of follow[0].
- n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
- merged = append(merged, lead[:n]...)
- if n >= len(lead) {
- break
- }
-
- // Swap lead and follow.
- lead, follow = follow, lead[n:]
- }
-
- // Append what's left in follow.
- merged = append(merged, follow...)
-
- return merged
-}
diff --git a/vendor/github.com/coreos/bbolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go
deleted file mode 100644
index 1cfb4cd..0000000
--- a/vendor/github.com/coreos/bbolt/tx.go
+++ /dev/null
@@ -1,682 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "io"
- "os"
- "sort"
- "strings"
- "time"
- "unsafe"
-)
-
-// txid represents the internal transaction identifier.
-type txid uint64
-
-// Tx represents a read-only or read/write transaction on the database.
-// Read-only transactions can be used for retrieving values for keys and creating cursors.
-// Read/write transactions can create and remove buckets and create and remove keys.
-//
-// IMPORTANT: You must commit or rollback transactions when you are done with
-// them. Pages can not be reclaimed by the writer until no more transactions
-// are using them. A long running read transaction can cause the database to
-// quickly grow.
-type Tx struct {
- writable bool
- managed bool
- db *DB
- meta *meta
- root Bucket
- pages map[pgid]*page
- stats TxStats
- commitHandlers []func()
-
- // WriteFlag specifies the flag for write-related methods like WriteTo().
- // Tx opens the database file with the specified flag to copy the data.
- //
- // By default, the flag is unset, which works well for mostly in-memory
- // workloads. For databases that are much larger than available RAM,
- // set the flag to syscall.O_DIRECT to avoid trashing the page cache.
- WriteFlag int
-}
-
-// init initializes the transaction.
-func (tx *Tx) init(db *DB) {
- tx.db = db
- tx.pages = nil
-
- // Copy the meta page since it can be changed by the writer.
- tx.meta = &meta{}
- db.meta().copy(tx.meta)
-
- // Copy over the root bucket.
- tx.root = newBucket(tx)
- tx.root.bucket = &bucket{}
- *tx.root.bucket = tx.meta.root
-
- // Increment the transaction id and add a page cache for writable transactions.
- if tx.writable {
- tx.pages = make(map[pgid]*page)
- tx.meta.txid += txid(1)
- }
-}
-
-// ID returns the transaction id.
-func (tx *Tx) ID() int {
- return int(tx.meta.txid)
-}
-
-// DB returns a reference to the database that created the transaction.
-func (tx *Tx) DB() *DB {
- return tx.db
-}
-
-// Size returns current database size in bytes as seen by this transaction.
-func (tx *Tx) Size() int64 {
- return int64(tx.meta.pgid) * int64(tx.db.pageSize)
-}
-
-// Writable returns whether the transaction can perform write operations.
-func (tx *Tx) Writable() bool {
- return tx.writable
-}
-
-// Cursor creates a cursor associated with the root bucket.
-// All items in the cursor will return a nil value because all root bucket keys point to buckets.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (tx *Tx) Cursor() *Cursor {
- return tx.root.Cursor()
-}
-
-// Stats retrieves a copy of the current transaction statistics.
-func (tx *Tx) Stats() TxStats {
- return tx.stats
-}
-
-// Bucket retrieves a bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) Bucket(name []byte) *Bucket {
- return tx.root.Bucket(name)
-}
-
-// CreateBucket creates a new bucket.
-// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
- return tx.root.CreateBucket(name)
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
- return tx.root.CreateBucketIfNotExists(name)
-}
-
-// DeleteBucket deletes a bucket.
-// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
-func (tx *Tx) DeleteBucket(name []byte) error {
- return tx.root.DeleteBucket(name)
-}
-
-// ForEach executes a function for each bucket in the root.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller.
-func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
- return tx.root.ForEach(func(k, v []byte) error {
- if err := fn(k, tx.root.Bucket(k)); err != nil {
- return err
- }
- return nil
- })
-}
-
-// OnCommit adds a handler function to be executed after the transaction successfully commits.
-func (tx *Tx) OnCommit(fn func()) {
- tx.commitHandlers = append(tx.commitHandlers, fn)
-}
-
-// Commit writes all changes to disk and updates the meta page.
-// Returns an error if a disk write error occurs, or if Commit is
-// called on a read-only transaction.
-func (tx *Tx) Commit() error {
- _assert(!tx.managed, "managed tx commit not allowed")
- if tx.db == nil {
- return ErrTxClosed
- } else if !tx.writable {
- return ErrTxNotWritable
- }
-
- // TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
-
- // Rebalance nodes which have had deletions.
- var startTime = time.Now()
- tx.root.rebalance()
- if tx.stats.Rebalance > 0 {
- tx.stats.RebalanceTime += time.Since(startTime)
- }
-
- // spill data onto dirty pages.
- startTime = time.Now()
- if err := tx.root.spill(); err != nil {
- tx.rollback()
- return err
- }
- tx.stats.SpillTime += time.Since(startTime)
-
- // Free the old root bucket.
- tx.meta.root.root = tx.root.root
-
- opgid := tx.meta.pgid
-
- // Free the freelist and allocate new pages for it. This will overestimate
- // the size of the freelist but not underestimate the size (which would be bad).
- tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
- p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
- if err != nil {
- tx.rollback()
- return err
- }
- if err := tx.db.freelist.write(p); err != nil {
- tx.rollback()
- return err
- }
- tx.meta.freelist = p.id
-
- // If the high water mark has moved up then attempt to grow the database.
- if tx.meta.pgid > opgid {
- if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
- tx.rollback()
- return err
- }
- }
-
- // Write dirty pages to disk.
- startTime = time.Now()
- if err := tx.write(); err != nil {
- tx.rollback()
- return err
- }
-
- // If strict mode is enabled then perform a consistency check.
- // Only the first consistency error is reported in the panic.
- if tx.db.StrictMode {
- ch := tx.Check()
- var errs []string
- for {
- err, ok := <-ch
- if !ok {
- break
- }
- errs = append(errs, err.Error())
- }
- if len(errs) > 0 {
- panic("check fail: " + strings.Join(errs, "\n"))
- }
- }
-
- // Write meta to disk.
- if err := tx.writeMeta(); err != nil {
- tx.rollback()
- return err
- }
- tx.stats.WriteTime += time.Since(startTime)
-
- // Finalize the transaction.
- tx.close()
-
- // Execute commit handlers now that the locks have been removed.
- for _, fn := range tx.commitHandlers {
- fn()
- }
-
- return nil
-}
-
-// Rollback closes the transaction and ignores all previous updates. Read-only
-// transactions must be rolled back and not committed.
-func (tx *Tx) Rollback() error {
- _assert(!tx.managed, "managed tx rollback not allowed")
- if tx.db == nil {
- return ErrTxClosed
- }
- tx.rollback()
- return nil
-}
-
-func (tx *Tx) rollback() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- tx.db.freelist.rollback(tx.meta.txid)
- tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
- }
- tx.close()
-}
-
-func (tx *Tx) close() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- // Grab freelist stats.
- var freelistFreeN = tx.db.freelist.free_count()
- var freelistPendingN = tx.db.freelist.pending_count()
- var freelistAlloc = tx.db.freelist.size()
-
- // Remove transaction ref & writer lock.
- tx.db.rwtx = nil
- tx.db.rwlock.Unlock()
-
- // Merge statistics.
- tx.db.statlock.Lock()
- tx.db.stats.FreePageN = freelistFreeN
- tx.db.stats.PendingPageN = freelistPendingN
- tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
- tx.db.stats.FreelistInuse = freelistAlloc
- tx.db.stats.TxStats.add(&tx.stats)
- tx.db.statlock.Unlock()
- } else {
- tx.db.removeTx(tx)
- }
-
- // Clear all references.
- tx.db = nil
- tx.meta = nil
- tx.root = Bucket{tx: tx}
- tx.pages = nil
-}
-
-// Copy writes the entire database to a writer.
-// This function exists for backwards compatibility. Use WriteTo() instead.
-func (tx *Tx) Copy(w io.Writer) error {
- _, err := tx.WriteTo(w)
- return err
-}
-
-// WriteTo writes the entire database to a writer.
-// If err == nil then exactly tx.Size() bytes will be written into the writer.
-func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
- // Attempt to open reader with WriteFlag
- f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
- if err != nil {
- return 0, err
- }
- defer func() { _ = f.Close() }()
-
- // Generate a meta page. We use the same page data for both meta pages.
- buf := make([]byte, tx.db.pageSize)
- page := (*page)(unsafe.Pointer(&buf[0]))
- page.flags = metaPageFlag
- *page.meta() = *tx.meta
-
- // Write meta 0.
- page.id = 0
- page.meta().checksum = page.meta().sum64()
- nn, err := w.Write(buf)
- n += int64(nn)
- if err != nil {
- return n, fmt.Errorf("meta 0 copy: %s", err)
- }
-
- // Write meta 1 with a lower transaction id.
- page.id = 1
- page.meta().txid -= 1
- page.meta().checksum = page.meta().sum64()
- nn, err = w.Write(buf)
- n += int64(nn)
- if err != nil {
- return n, fmt.Errorf("meta 1 copy: %s", err)
- }
-
- // Move past the meta pages in the file.
- if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
- return n, fmt.Errorf("seek: %s", err)
- }
-
- // Copy data pages.
- wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
- n += wn
- if err != nil {
- return n, err
- }
-
- return n, f.Close()
-}
-
-// CopyFile copies the entire database to file at the given path.
-// A reader transaction is maintained during the copy so it is safe to continue
-// using the database while a copy is in progress.
-func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
- f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
- if err != nil {
- return err
- }
-
- err = tx.Copy(f)
- if err != nil {
- _ = f.Close()
- return err
- }
- return f.Close()
-}
-
-// Check performs several consistency checks on the database for this transaction.
-// An error is returned if any inconsistency is found.
-//
-// It can be safely run concurrently on a writable transaction. However, this
-// incurs a high cost for large databases and databases with a lot of subbuckets
-// because of caching. This overhead can be removed if running on a read-only
-// transaction, however, it is not safe to execute other writer transactions at
-// the same time.
-func (tx *Tx) Check() <-chan error {
- ch := make(chan error)
- go tx.check(ch)
- return ch
-}
-
-func (tx *Tx) check(ch chan error) {
- // Check if any pages are double freed.
- freed := make(map[pgid]bool)
- for _, id := range tx.db.freelist.all() {
- if freed[id] {
- ch <- fmt.Errorf("page %d: already freed", id)
- }
- freed[id] = true
- }
-
- // Track every reachable page.
- reachable := make(map[pgid]*page)
- reachable[0] = tx.page(0) // meta0
- reachable[1] = tx.page(1) // meta1
- for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
- reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
- }
-
- // Recursively check buckets.
- tx.checkBucket(&tx.root, reachable, freed, ch)
-
- // Ensure all pages below high water mark are either reachable or freed.
- for i := pgid(0); i < tx.meta.pgid; i++ {
- _, isReachable := reachable[i]
- if !isReachable && !freed[i] {
- ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
- }
- }
-
- // Close the channel to signal completion.
- close(ch)
-}
-
-func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
- // Ignore inline buckets.
- if b.root == 0 {
- return
- }
-
- // Check every page used by this bucket.
- b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
- if p.id > tx.meta.pgid {
- ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
- }
-
- // Ensure each page is only referenced once.
- for i := pgid(0); i <= pgid(p.overflow); i++ {
- var id = p.id + i
- if _, ok := reachable[id]; ok {
- ch <- fmt.Errorf("page %d: multiple references", int(id))
- }
- reachable[id] = p
- }
-
- // We should only encounter un-freed leaf and branch pages.
- if freed[p.id] {
- ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
- } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
- ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
- }
- })
-
- // Check each bucket within this bucket.
- _ = b.ForEach(func(k, v []byte) error {
- if child := b.Bucket(k); child != nil {
- tx.checkBucket(child, reachable, freed, ch)
- }
- return nil
- })
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (tx *Tx) allocate(count int) (*page, error) {
- p, err := tx.db.allocate(count)
- if err != nil {
- return nil, err
- }
-
- // Save to our page cache.
- tx.pages[p.id] = p
-
- // Update statistics.
- tx.stats.PageCount++
- tx.stats.PageAlloc += count * tx.db.pageSize
-
- return p, nil
-}
-
-// write writes any dirty pages to disk.
-func (tx *Tx) write() error {
- // Sort pages by id.
- pages := make(pages, 0, len(tx.pages))
- for _, p := range tx.pages {
- pages = append(pages, p)
- }
- // Clear out page cache early.
- tx.pages = make(map[pgid]*page)
- sort.Sort(pages)
-
- // Write pages to disk in order.
- for _, p := range pages {
- size := (int(p.overflow) + 1) * tx.db.pageSize
- offset := int64(p.id) * int64(tx.db.pageSize)
-
- // Write out page in "max allocation" sized chunks.
- ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
- for {
- // Limit our write to our max allocation size.
- sz := size
- if sz > maxAllocSize-1 {
- sz = maxAllocSize - 1
- }
-
- // Write chunk to disk.
- buf := ptr[:sz]
- if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
- return err
- }
-
- // Update statistics.
- tx.stats.Write++
-
- // Exit inner for loop if we've written all the chunks.
- size -= sz
- if size == 0 {
- break
- }
-
- // Otherwise move offset forward and move pointer to next chunk.
- offset += int64(sz)
- ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
- }
- }
-
- // Ignore file sync if flag is set on DB.
- if !tx.db.NoSync || IgnoreNoSync {
- if err := fdatasync(tx.db); err != nil {
- return err
- }
- }
-
- // Put small pages back to page pool.
- for _, p := range pages {
- // Ignore page sizes over 1 page.
- // These are allocated using make() instead of the page pool.
- if int(p.overflow) != 0 {
- continue
- }
-
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
-
- // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
- for i := range buf {
- buf[i] = 0
- }
- tx.db.pagePool.Put(buf)
- }
-
- return nil
-}
-
-// writeMeta writes the meta to the disk.
-func (tx *Tx) writeMeta() error {
- // Create a temporary buffer for the meta page.
- buf := make([]byte, tx.db.pageSize)
- p := tx.db.pageInBuffer(buf, 0)
- tx.meta.write(p)
-
- // Write the meta page to file.
- if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
- return err
- }
- if !tx.db.NoSync || IgnoreNoSync {
- if err := fdatasync(tx.db); err != nil {
- return err
- }
- }
-
- // Update statistics.
- tx.stats.Write++
-
- return nil
-}
-
-// page returns a reference to the page with a given id.
-// If page has been written to then a temporary buffered page is returned.
-func (tx *Tx) page(id pgid) *page {
- // Check the dirty pages first.
- if tx.pages != nil {
- if p, ok := tx.pages[id]; ok {
- return p
- }
- }
-
- // Otherwise return directly from the mmap.
- return tx.db.page(id)
-}
-
-// forEachPage iterates over every page within a given page and executes a function.
-func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
- p := tx.page(pgid)
-
- // Execute function.
- fn(p, depth)
-
- // Recursively loop over children.
- if (p.flags & branchPageFlag) != 0 {
- for i := 0; i < int(p.count); i++ {
- elem := p.branchPageElement(uint16(i))
- tx.forEachPage(elem.pgid, depth+1, fn)
- }
- }
-}
-
-// Page returns page information for a given page number.
-// This is only safe for concurrent use when used by a writable transaction.
-func (tx *Tx) Page(id int) (*PageInfo, error) {
- if tx.db == nil {
- return nil, ErrTxClosed
- } else if pgid(id) >= tx.meta.pgid {
- return nil, nil
- }
-
- // Build the page info.
- p := tx.db.page(pgid(id))
- info := &PageInfo{
- ID: id,
- Count: int(p.count),
- OverflowCount: int(p.overflow),
- }
-
- // Determine the type (or if it's free).
- if tx.db.freelist.freed(pgid(id)) {
- info.Type = "free"
- } else {
- info.Type = p.typ()
- }
-
- return info, nil
-}
-
-// TxStats represents statistics about the actions performed by the transaction.
-type TxStats struct {
- // Page statistics.
- PageCount int // number of page allocations
- PageAlloc int // total bytes allocated
-
- // Cursor statistics.
- CursorCount int // number of cursors created
-
- // Node statistics
- NodeCount int // number of node allocations
- NodeDeref int // number of node dereferences
-
- // Rebalance statistics.
- Rebalance int // number of node rebalances
- RebalanceTime time.Duration // total time spent rebalancing
-
- // Split/Spill statistics.
- Split int // number of nodes split
- Spill int // number of nodes spilled
- SpillTime time.Duration // total time spent spilling
-
- // Write statistics.
- Write int // number of writes performed
- WriteTime time.Duration // total time spent writing to disk
-}
-
-func (s *TxStats) add(other *TxStats) {
- s.PageCount += other.PageCount
- s.PageAlloc += other.PageAlloc
- s.CursorCount += other.CursorCount
- s.NodeCount += other.NodeCount
- s.NodeDeref += other.NodeDeref
- s.Rebalance += other.Rebalance
- s.RebalanceTime += other.RebalanceTime
- s.Split += other.Split
- s.Spill += other.Spill
- s.SpillTime += other.SpillTime
- s.Write += other.Write
- s.WriteTime += other.WriteTime
-}
-
-// Sub calculates and returns the difference between two sets of transaction stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *TxStats) Sub(other *TxStats) TxStats {
- var diff TxStats
- diff.PageCount = s.PageCount - other.PageCount
- diff.PageAlloc = s.PageAlloc - other.PageAlloc
- diff.CursorCount = s.CursorCount - other.CursorCount
- diff.NodeCount = s.NodeCount - other.NodeCount
- diff.NodeDeref = s.NodeDeref - other.NodeDeref
- diff.Rebalance = s.Rebalance - other.Rebalance
- diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
- diff.Split = s.Split - other.Split
- diff.Spill = s.Spill - other.Spill
- diff.SpillTime = s.SpillTime - other.SpillTime
- diff.Write = s.Write - other.Write
- diff.WriteTime = s.WriteTime - other.WriteTime
- return diff
-}
diff --git a/vendor/github.com/coreos/etcd/.dockerignore b/vendor/github.com/coreos/etcd/.dockerignore
deleted file mode 100644
index 6b8710a..0000000
--- a/vendor/github.com/coreos/etcd/.dockerignore
+++ /dev/null
@@ -1 +0,0 @@
-.git
diff --git a/vendor/github.com/coreos/etcd/.gitignore b/vendor/github.com/coreos/etcd/.gitignore
deleted file mode 100644
index b055a98..0000000
--- a/vendor/github.com/coreos/etcd/.gitignore
+++ /dev/null
@@ -1,21 +0,0 @@
-/agent-*
-/coverage
-/covdir
-/docs
-/vendor
-/gopath
-/gopath.proto
-/go-bindata
-/release
-/machine*
-/bin
-.vagrant
-*.etcd
-*.log
-/etcd
-*.swp
-/hack/insta-discovery/.env
-*.test
-hack/tls-setup/certs
-.idea
-*.bak
diff --git a/vendor/github.com/coreos/etcd/.godir b/vendor/github.com/coreos/etcd/.godir
deleted file mode 100644
index 00ff6aa..0000000
--- a/vendor/github.com/coreos/etcd/.godir
+++ /dev/null
@@ -1 +0,0 @@
-github.com/coreos/etcd
diff --git a/vendor/github.com/coreos/etcd/.header b/vendor/github.com/coreos/etcd/.header
deleted file mode 100644
index 0446af6..0000000
--- a/vendor/github.com/coreos/etcd/.header
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/.travis.yml b/vendor/github.com/coreos/etcd/.travis.yml
deleted file mode 100644
index bbe4af7..0000000
--- a/vendor/github.com/coreos/etcd/.travis.yml
+++ /dev/null
@@ -1,73 +0,0 @@
-language: go
-go_import_path: github.com/coreos/etcd
-
-sudo: required
-
-services: docker
-
-go:
-- 1.10.7
-
-notifications:
- on_success: never
- on_failure: never
-
-env:
- matrix:
- - TARGET=linux-amd64-integration
- - TARGET=linux-amd64-functional
- - TARGET=linux-amd64-unit
- - TARGET=all-build
- - TARGET=linux-386-unit
-
-matrix:
- fast_finish: true
- allow_failures:
- - go: 1.10.7
- env: TARGET=linux-386-unit
- exclude:
- - go: tip
- env: TARGET=linux-386-unit
-
-before_install:
-- if [[ $TRAVIS_GO_VERSION == 1.* ]]; then docker pull gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION}; fi
-
-install:
-- pushd cmd/etcd && go get -t -v ./... && popd
-
-script:
- - echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
- - >
- case "${TARGET}" in
- linux-amd64-integration)
- docker run --rm \
- --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
- /bin/bash -c "GOARCH=amd64 PASSES='integration' ./test"
- ;;
- linux-amd64-functional)
- docker run --rm \
- --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
- /bin/bash -c "./build && GOARCH=amd64 PASSES='functional' ./test"
- ;;
- linux-amd64-unit)
- docker run --rm \
- --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
- /bin/bash -c "GOARCH=amd64 PASSES='unit' ./test"
- ;;
- all-build)
- docker run --rm \
- --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
- /bin/bash -c "GOARCH=amd64 PASSES='build' ./test \
- && GOARCH=386 PASSES='build' ./test \
- && GO_BUILD_FLAGS='-v' GOOS=darwin GOARCH=amd64 ./build \
- && GO_BUILD_FLAGS='-v' GOOS=windows GOARCH=amd64 ./build \
- && GO_BUILD_FLAGS='-v' GOARCH=arm ./build \
- && GO_BUILD_FLAGS='-v' GOARCH=arm64 ./build \
- && GO_BUILD_FLAGS='-v' GOARCH=ppc64le ./build"
- ;;
- linux-386-unit)
- docker run --rm \
- --volume=`pwd`:/go/src/github.com/coreos/etcd gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION} \
- /bin/bash -c "GOARCH=386 PASSES='unit' ./test"
- ;;
- esac
diff --git a/vendor/github.com/coreos/etcd/.words b/vendor/github.com/coreos/etcd/.words
deleted file mode 100644
index 31fffef..0000000
--- a/vendor/github.com/coreos/etcd/.words
+++ /dev/null
@@ -1,44 +0,0 @@
-DefaultMaxRequestBytes
-ErrCodeEnhanceYourCalm
-ErrTimeout
-GoAway
-KeepAlive
-Keepalive
-MiB
-ResourceExhausted
-RPC
-RPCs
-TODO
-backoff
-blackhole
-blackholed
-cancelable
-cancelation
-cluster_proxy
-defragment
-defragmenting
-etcd
-gRPC
-goroutine
-goroutines
-healthcheck
-iff
-inflight
-keepalive
-keepalives
-keyspace
-linearization
-localhost
-mutex
-prefetching
-protobuf
-prometheus
-rafthttp
-repin
-serializable
-teardown
-too_many_pings
-uncontended
-unprefixed
-unlisting
-
diff --git a/vendor/github.com/coreos/etcd/CHANGELOG.md b/vendor/github.com/coreos/etcd/CHANGELOG.md
deleted file mode 100644
index 603e501..0000000
--- a/vendor/github.com/coreos/etcd/CHANGELOG.md
+++ /dev/null
@@ -1,746 +0,0 @@
-## [v3.3.0](https://github.com/coreos/etcd/releases/tag/v3.3.0)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.0...v3.3.0) and [v3.3 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_3.md) for any breaking changes.
-
-### Improved
-
-- Use [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to replace [`boltdb/bolt`](https://github.com/boltdb/bolt#project-status).
- - Fix [etcd database size grows until `mvcc: database space exceeded`](https://github.com/coreos/etcd/issues/8009).
-- [Reduce memory allocation](https://github.com/coreos/etcd/pull/8428) on [Range operations](https://github.com/coreos/etcd/pull/8475).
-- [Rate limit](https://github.com/coreos/etcd/pull/8099) and [randomize](https://github.com/coreos/etcd/pull/8101) lease revoke on restart or leader elections.
- - Prevent [spikes in Raft proposal rate](https://github.com/coreos/etcd/issues/8096).
-- Support `clientv3` balancer failover under [network faults/partitions](https://github.com/coreos/etcd/issues/8711).
-- Better warning on [mismatched `--initial-cluster`](https://github.com/coreos/etcd/pull/8083) flag.
-
-### Changed(Breaking Changes)
-
-- Require [Go 1.9+](https://github.com/coreos/etcd/issues/6174).
- - Compile with *Go 1.9.2*.
- - Deprecate [`golang.org/x/net/context`](https://github.com/coreos/etcd/pull/8511).
-- Require [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) [**`v1.7.4`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.4) or [**`v1.7.5+`**](https://github.com/grpc/grpc-go/releases/tag/v1.7.5):
- - Deprecate [`metadata.Incoming/OutgoingContext`](https://github.com/coreos/etcd/pull/7896).
- - Deprecate `grpclog.Logger`, upgrade to [`grpclog.LoggerV2`](https://github.com/coreos/etcd/pull/8533).
- - Deprecate [`grpc.ErrClientConnTimeout`](https://github.com/coreos/etcd/pull/8505) errors in `clientv3`.
- - Use [`MaxRecvMsgSize` and `MaxSendMsgSize`](https://github.com/coreos/etcd/pull/8437) to limit message size, in etcd server.
-- Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) `v1.2.2` to `v1.3.0`.
-- Translate [gRPC status error in v3 client `Snapshot` API](https://github.com/coreos/etcd/pull/9038).
-- Upgrade [`github.com/ugorji/go/codec`](https://github.com/ugorji/go) for v2 `client`.
- - [Regenerated](https://github.com/coreos/etcd/pull/8721) v2 `client` source code with latest `ugorji/go/codec`.
-- Fix [`/health` endpoint JSON output](https://github.com/coreos/etcd/pull/8312).
-- v3 `etcdctl` [`lease timetolive LEASE_ID`](https://github.com/coreos/etcd/issues/9028) on expired lease now prints [`lease LEASE_ID already expired`](https://github.com/coreos/etcd/pull/9047).
- - <=3.2 prints `lease LEASE_ID granted with TTL(0s), remaining(-1s)`.
-
-### Added(`etcd`)
-
-- Add [`--experimental-enable-v2v3`](https://github.com/coreos/etcd/pull/8407) flag to [emulate v2 API with v3](https://github.com/coreos/etcd/issues/6925).
-- Add [`--experimental-corrupt-check-time`](https://github.com/coreos/etcd/pull/8420) flag to [raise corrupt alarm monitoring](https://github.com/coreos/etcd/issues/7125).
-- Add [`--experimental-initial-corrupt-check`](https://github.com/coreos/etcd/pull/8554) flag to [check database hash before serving client/peer traffic](https://github.com/coreos/etcd/issues/8313).
-- Add [`--max-txn-ops`](https://github.com/coreos/etcd/pull/7976) flag to [configure maximum number operations in transaction](https://github.com/coreos/etcd/issues/7826).
-- Add [`--max-request-bytes`](https://github.com/coreos/etcd/pull/7968) flag to [configure maximum client request size](https://github.com/coreos/etcd/issues/7923).
- - If not configured, it defaults to 1.5 MiB.
-- Add [`--client-crl-file`, `--peer-crl-file`](https://github.com/coreos/etcd/pull/8124) flags for [Certificate revocation list](https://github.com/coreos/etcd/issues/4034).
-- Add [`--peer-require-cn`](https://github.com/coreos/etcd/pull/8616) flag to support [CN-based auth for inter-peer connection](https://github.com/coreos/etcd/issues/8262).
-- Add [`--listen-metrics-urls`](https://github.com/coreos/etcd/pull/8242) flag for additional `/metrics` endpoints.
- - Support [additional (non) TLS `/metrics` endpoints for a TLS-enabled cluster](https://github.com/coreos/etcd/pull/8282).
- - e.g. `--listen-metrics-urls=https://localhost:2378,http://localhost:9379` to serve `/metrics` in secure port 2378 and insecure port 9379.
- - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/coreos/etcd/issues/8060).
-- Add [`--auto-compaction-mode`](https://github.com/coreos/etcd/pull/8123) flag to [support revision-based compaction](https://github.com/coreos/etcd/issues/8098).
-- Change `--auto-compaction-retention` flag to [accept string values](https://github.com/coreos/etcd/pull/8563) with [finer granularity](https://github.com/coreos/etcd/issues/8503).
-- Add [`--grpc-keepalive-min-time`, `--grpc-keepalive-interval`, `--grpc-keepalive-timeout`](https://github.com/coreos/etcd/pull/8535) flags to configure server-side keepalive policies.
-- Serve [`/health` endpoint as unhealthy](https://github.com/coreos/etcd/pull/8272) when [alarm is raised](https://github.com/coreos/etcd/issues/8207).
-- Provide [error information in `/health`](https://github.com/coreos/etcd/pull/8312).
- - e.g. `{"health":false,"errors":["NOSPACE"]}`.
-- Move [logging setup to embed package](https://github.com/coreos/etcd/pull/8810)
- - Disable gRPC server log by default.
-- Use [monotonic time in Go 1.9](https://github.com/coreos/etcd/pull/8507) for `lease` package.
-- Warn on [empty hosts in advertise URLs](https://github.com/coreos/etcd/pull/8384).
- - Address [advertise client URLs accepts empty hosts](https://github.com/coreos/etcd/issues/8379).
- - etcd `v3.4` will exit on this error.
- - e.g. `--advertise-client-urls=http://:2379`.
-- Warn on [shadowed environment variables](https://github.com/coreos/etcd/pull/8385).
- - Address [error on shadowed environment variables](https://github.com/coreos/etcd/issues/8380).
- - etcd `v3.4` will exit on this error.
-
-### Added(API)
-
-- Support [ranges in transaction comparisons](https://github.com/coreos/etcd/pull/8025) for [disconnected linearized reads](https://github.com/coreos/etcd/issues/7924).
-- Add [nested transactions](https://github.com/coreos/etcd/pull/8102) to extend [proxy use cases](https://github.com/coreos/etcd/issues/7857).
-- Add [lease comparison target in transaction](https://github.com/coreos/etcd/pull/8324).
-- Add [lease list](https://github.com/coreos/etcd/pull/8358).
-- Add [hash by revision](https://github.com/coreos/etcd/pull/8263) for [better corruption checking against boltdb](https://github.com/coreos/etcd/issues/8016).
-
-### Added(`etcd/clientv3`)
-
-- Add [health balancer](https://github.com/coreos/etcd/pull/8545) to fix [watch API hangs](https://github.com/coreos/etcd/issues/7247), improve [endpoint switch under network faults](https://github.com/coreos/etcd/issues/7941).
-- [Refactor balancer](https://github.com/coreos/etcd/pull/8840) and add [client-side keepalive pings](https://github.com/coreos/etcd/pull/8199) to handle [network partitions](https://github.com/coreos/etcd/issues/8711).
-- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/coreos/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/coreos/etcd/clientv3#Config).
- - Fix [exceeded response size limit error in client-side](https://github.com/coreos/etcd/issues/9043).
- - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099).
- - `MaxCallSendMsgSize` default value is 2 MiB, if not configured.
- - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured.
-- Accept [`Compare_LEASE`](https://github.com/coreos/etcd/pull/8324) in [`clientv3.Compare`](https://godoc.org/github.com/coreos/etcd/clientv3#Compare).
-- Add [`LeaseValue` helper](https://github.com/coreos/etcd/pull/8488) to `Cmp` `LeaseID` values in `Txn`.
-- Add [`MoveLeader`](https://github.com/coreos/etcd/pull/8153) to `Maintenance`.
-- Add [`HashKV`](https://github.com/coreos/etcd/pull/8351) to `Maintenance`.
-- Add [`Leases`](https://github.com/coreos/etcd/pull/8358) to `Lease`.
-- Add [`clientv3/ordering`](https://github.com/coreos/etcd/pull/8092) for enforce [ordering in serialized requests](https://github.com/coreos/etcd/issues/7623).
-
-### Added(v2 `etcdctl`)
-
-- Add [`backup --with-v3`](https://github.com/coreos/etcd/pull/8479) flag.
-
-### Added(v3 `etcdctl`)
-
-- Add [`--discovery-srv`](https://github.com/coreos/etcd/pull/8462) flag.
-- Add [`--keepalive-time`, `--keepalive-timeout`](https://github.com/coreos/etcd/pull/8663) flags.
-- Add [`lease list`](https://github.com/coreos/etcd/pull/8358) command.
-- Add [`lease keep-alive --once`](https://github.com/coreos/etcd/pull/8775) flag.
-- Make [`lease timetolive LEASE_ID`](https://github.com/coreos/etcd/issues/9028) on expired lease print [`lease LEASE_ID already expired`](https://github.com/coreos/etcd/pull/9047).
- - <=3.2 prints `lease LEASE_ID granted with TTL(0s), remaining(-1s)`.
-- Add [`defrag --data-dir`](https://github.com/coreos/etcd/pull/8367) flag.
-- Add [`move-leader`](https://github.com/coreos/etcd/pull/8153) command.
-- Add [`endpoint hashkv`](https://github.com/coreos/etcd/pull/8351) command.
-- Add [`endpoint --cluster`](https://github.com/coreos/etcd/pull/8143) flag, equivalent to [v2 `etcdctl cluster-health`](https://github.com/coreos/etcd/issues/8117).
-- Make `endpoint health` command terminate with [non-zero exit code on unhealthy status](https://github.com/coreos/etcd/pull/8342).
-- Add [`lock --ttl`](https://github.com/coreos/etcd/pull/8370) flag.
-- Support [`watch [key] [range_end] -- [exec-command…]`](https://github.com/coreos/etcd/pull/8919), equivalent to [v2 `etcdctl exec-watch`](https://github.com/coreos/etcd/issues/8814).
-- Enable [`clientv3.WithRequireLeader(context.Context)` for `watch`](https://github.com/coreos/etcd/pull/8672) command.
-- Print [`"del"` instead of `"delete"`](https://github.com/coreos/etcd/pull/8297) in `txn` interactive mode.
-- Print [`ETCD_INITIAL_ADVERTISE_PEER_URLS` in `member add`](https://github.com/coreos/etcd/pull/8332).
-
-### Added(metrics)
-
-- Add [`etcd --listen-metrics-urls`](https://github.com/coreos/etcd/pull/8242) flag for additional `/metrics` endpoints.
- - Useful for [bypassing critical APIs when monitoring etcd](https://github.com/coreos/etcd/issues/8060).
-- Add [`etcd_server_version`](https://github.com/coreos/etcd/pull/8960) Prometheus metric.
- - To replace [Kubernetes `etcd-version-monitor`](https://github.com/coreos/etcd/issues/8948).
-- Add [`etcd_debugging_mvcc_db_compaction_keys_total`](https://github.com/coreos/etcd/pull/8280) Prometheus metric.
-- Add [`etcd_debugging_server_lease_expired_total`](https://github.com/coreos/etcd/pull/8064) Prometheus metric.
- - To improve [lease revoke monitoring](https://github.com/coreos/etcd/issues/8050).
-- Document [Prometheus 2.0 rules](https://github.com/coreos/etcd/pull/8879).
-- Initialize gRPC server [metrics with zero values](https://github.com/coreos/etcd/pull/8878).
-
-### Added(`grpc-proxy`)
-
-- Add [`grpc-proxy start --experimental-leasing-prefix`](https://github.com/coreos/etcd/pull/8341) flag:
- - For disconnected linearized reads.
- - Based on [V system leasing](https://github.com/coreos/etcd/issues/6065).
- - See ["Disconnected consistent reads with etcd" blog post](https://coreos.com/blog/coreos-labs-disconnected-consistent-reads-with-etcd).
-- Add [`grpc-proxy start --experimental-serializable-ordering`](https://github.com/coreos/etcd/pull/8315) flag.
- - To ensure serializable reads have monotonically increasing store revisions across endpoints.
-- Add [`grpc-proxy start --metrics-addr`](https://github.com/coreos/etcd/pull/8242) flag for an additional `/metrics` endpoint.
- - Set `--metrics-addr=http://[HOST]:9379` to serve `/metrics` in insecure port 9379.
-- Serve [`/health` endpoint in grpc-proxy](https://github.com/coreos/etcd/pull/8322).
-- Add [`grpc-proxy start --debug`](https://github.com/coreos/etcd/pull/8994) flag.
-
-### Added(gRPC gateway)
-
-- Replace [gRPC gateway](https://github.com/grpc-ecosystem/grpc-gateway) endpoint with [`/v3beta`](https://github.com/coreos/etcd/pull/8880).
- - To deprecate [`/v3alpha`](https://github.com/coreos/etcd/issues/8125) in `v3.4`.
-- Support ["authorization" token](https://github.com/coreos/etcd/pull/7999).
-- Support [websocket for bi-directional streams](https://github.com/coreos/etcd/pull/8257).
- - Fix [`Watch` API with gRPC gateway](https://github.com/coreos/etcd/issues/8237).
-- Upgrade gRPC gateway to [v1.3.0](https://github.com/coreos/etcd/issues/8838).
-
-### Added(`etcd/raft`)
-
-- Add [non-voting member](https://github.com/coreos/etcd/pull/8751).
- - To implement [Raft thesis 4.2.1 Catching up new servers](https://github.com/coreos/etcd/issues/8568).
- - `Learner` node does not vote or promote itself.
-
-### Added/Fixed(Security/Auth)
-
-- Add [CRL based connection rejection](https://github.com/coreos/etcd/pull/8124) to manage [revoked certs](https://github.com/coreos/etcd/issues/4034).
-- Document [TLS authentication changes](https://github.com/coreos/etcd/pull/8895):
- - [Server accepts connections if IP matches, without checking DNS entries](https://github.com/coreos/etcd/pull/8223). For instance, if peer cert contains IP addresses and DNS names in Subject Alternative Name (SAN) field, and the remote IP address matches one of those IP addresses, server just accepts connection without further checking the DNS names.
- - [Server supports reverse-lookup on wildcard DNS `SAN`](https://github.com/coreos/etcd/pull/8281). For instance, if peer cert contains only DNS names (no IP addresses) in Subject Alternative Name (SAN) field, server first reverse-lookups the remote IP address to get a list of names mapping to that address (e.g. `nslookup IPADDR`). Then accepts the connection if those names have a matching name with peer cert's DNS names (either by exact or wildcard match). If none is matched, server forward-lookups each DNS entry in peer cert (e.g. look up `example.default.svc` when the entry is `*.example.default.svc`), and accepts connection only when the host's resolved addresses have the matching IP address with the peer's remote IP address.
-- Add [`etcd --peer-require-cn`](https://github.com/coreos/etcd/pull/8616) flag.
- - To support [CommonName(CN) based auth](https://github.com/coreos/etcd/issues/8262) for inter peer connection.
-- [Swap priority](https://github.com/coreos/etcd/pull/8594) of cert CommonName(CN) and username + password.
- - To address ["username and password specified in the request should take priority over CN in the cert"](https://github.com/coreos/etcd/issues/8584).
-- Protect [lease revoke with auth](https://github.com/coreos/etcd/pull/8031).
-- Provide user's role on [auth permission error](https://github.com/coreos/etcd/pull/8164).
-- Fix [auth store panic with disabled token](https://github.com/coreos/etcd/pull/8695).
-- Update `golang.org/x/crypto/bcrypt` (see [golang/crypto@6c586e1](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117)).
-
-### Fixed(v2)
-
-- [Fail-over v2 client](https://github.com/coreos/etcd/pull/8519) to next endpoint on [oneshot failure](https://github.com/coreos/etcd/issues/8515).
-- [Put back `/v2/machines`](https://github.com/coreos/etcd/pull/8062) endpoint for python-etcd wrapper.
-
-### Fixed(v3)
-
-- Fix [range/put/delete operation metrics](https://github.com/coreos/etcd/pull/8054) with transaction:
- - `etcd_debugging_mvcc_range_total`
- - `etcd_debugging_mvcc_put_total`
- - `etcd_debugging_mvcc_delete_total`
- - `etcd_debugging_mvcc_txn_total`
-- Fix [`etcd_debugging_mvcc_keys_total`](https://github.com/coreos/etcd/pull/8390) on restore.
-- Fix [`etcd_debugging_mvcc_db_total_size_in_bytes`](https://github.com/coreos/etcd/pull/8120) on restore.
- - Also change to [`prometheus.NewGaugeFunc`](https://github.com/coreos/etcd/pull/8150).
-- Fix [backend database in-memory index corruption](https://github.com/coreos/etcd/pull/8127) issue on restore (only 3.2.0 is affected).
-- Fix [watch restore from snapshot](https://github.com/coreos/etcd/pull/8427).
-- Fix ["put at-most-once" in `clientv3`](https://github.com/coreos/etcd/pull/8335).
-- Handle [empty key permission](https://github.com/coreos/etcd/pull/8514) in `etcdctl`.
-- [Fix server crash](https://github.com/coreos/etcd/pull/8010) on [invalid transaction request from gRPC gateway](https://github.com/coreos/etcd/issues/7889).
-- Fix [`clientv3.WatchResponse.Canceled`](https://github.com/coreos/etcd/pull/8283) on [compacted watch request](https://github.com/coreos/etcd/issues/8231).
-- Handle [WAL renaming failure on Windows](https://github.com/coreos/etcd/pull/8286).
-- Make [peer dial timeout longer](https://github.com/coreos/etcd/pull/8599).
- - See [coreos/etcd-operator#1300](https://github.com/coreos/etcd-operator/issues/1300) for more detail.
-- Make server [wait up to request time-out](https://github.com/coreos/etcd/pull/8267) with [pending RPCs](https://github.com/coreos/etcd/issues/8224).
-- Fix [`grpc.Server` panic on `GracefulStop`](https://github.com/coreos/etcd/pull/8987) with [TLS-enabled server](https://github.com/coreos/etcd/issues/8916).
-- Fix ["multiple peer URLs cannot start" issue](https://github.com/coreos/etcd/issues/8383).
-- Fix server-side auth so [concurrent auth operations do not return old revision error](https://github.com/coreos/etcd/pull/8442).
-- Fix [`concurrency/stm` `Put` with serializable snapshot](https://github.com/coreos/etcd/pull/8439).
- - Use store revision from first fetch to resolve write conflicts instead of modified revision.
-- Fix [`grpc-proxy` Snapshot API error handling](https://github.com/coreos/etcd/commit/dbd16d52fbf81e5fd806d21ff5e9148d5bf203ab).
-- Fix [`grpc-proxy` KV API `PrevKv` flag handling](https://github.com/coreos/etcd/pull/8366).
-- Fix [`grpc-proxy` KV API `KeysOnly` flag handling](https://github.com/coreos/etcd/pull/8552).
-- Upgrade [`coreos/go-systemd`](https://github.com/coreos/go-systemd/releases) to `v15` (see https://github.com/coreos/go-systemd/releases/tag/v15).
-
-### Other
-
-- Support previous two minor versions (see our [new release policy](https://github.com/coreos/etcd/pull/8805)).
-- `v3.3.x` is the last release cycle that supports `ACI`:
- - AppC was [officially suspended](https://github.com/appc/spec#-disclaimer-), as of late 2016.
- - [`acbuild`](https://github.com/containers/build#this-project-is-currently-unmaintained) is not maintained anymore.
- - `*.aci` files won't be available from etcd `v3.4` release.
-- Add container registry [`gcr.io/etcd-development/etcd`](https://gcr.io/etcd-development/etcd).
- - [quay.io/coreos/etcd](https://quay.io/coreos/etcd) is still supported as secondary.
-
-
-## [v3.2.12](https://github.com/coreos/etcd/releases/tag/v3.2.12) (2017-12-20)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.11...v3.2.12) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Fix [error message of `Revision` compactor](https://github.com/coreos/etcd/pull/8999) in server-side.
-
-### Added(`etcd/clientv3`,`etcdctl/v3`)
-
-- Add [`MaxCallSendMsgSize` and `MaxCallRecvMsgSize`](https://github.com/coreos/etcd/pull/9047) fields to [`clientv3.Config`](https://godoc.org/github.com/coreos/etcd/clientv3#Config).
- - Fix [exceeded response size limit error in client-side](https://github.com/coreos/etcd/issues/9043).
- - Address [kubernetes#51099](https://github.com/kubernetes/kubernetes/issues/51099).
- - `MaxCallSendMsgSize` default value is 2 MiB, if not configured.
- - `MaxCallRecvMsgSize` default value is `math.MaxInt32`, if not configured.
-
-### Other
-
-- Pin [grpc v1.7.5](https://github.com/grpc/grpc-go/releases/tag/v1.7.5), [grpc-gateway v1.3.0](https://github.com/grpc-ecosystem/grpc-gateway/releases/tag/v1.3.0).
- - No code change, just to be explicit about recommended versions.
-
-
-## [v3.2.11](https://github.com/coreos/etcd/releases/tag/v3.2.11) (2017-12-05)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.10...v3.2.11) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Fix racey grpc-go's server handler transport `WriteStatus` call to prevent [TLS-enabled etcd server crash](https://github.com/coreos/etcd/issues/8904):
- - Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) `v1.7.3` to `v1.7.4`.
- - Add [gRPC RPC failure warnings](https://github.com/coreos/etcd/pull/8939) to help debug such issues in the future.
-- Remove `--listen-metrics-urls` flag in monitoring document (non-released in `v3.2.x`, planned for `v3.3.x`).
-
-### Added
-
-- Provide [more cert details](https://github.com/coreos/etcd/pull/8952/files) on TLS handshake failures.
-
-
-## [v3.1.11](https://github.com/coreos/etcd/releases/tag/v3.1.11) (2017-11-28)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.10...v3.1.11) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- [#8411](https://github.com/coreos/etcd/issues/8411),[#8806](https://github.com/coreos/etcd/pull/8806) mvcc: fix watch restore from snapshot
-- [#8009](https://github.com/coreos/etcd/issues/8009),[#8902](https://github.com/coreos/etcd/pull/8902) backport coreos/bbolt v1.3.1-coreos.5
-
-
-## [v3.2.10](https://github.com/coreos/etcd/releases/tag/v3.2.10) (2017-11-16)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.9...v3.2.10) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Replace backend key-value database `boltdb/bolt` with [`coreos/bbolt`](https://github.com/coreos/bbolt/releases) to address [backend database size issue](https://github.com/coreos/etcd/issues/8009).
-- Fix `clientv3` balancer to handle [network partitions](https://github.com/coreos/etcd/issues/8711):
- - Upgrade [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) `v1.2.1` to `v1.7.3`.
- - Upgrade [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) `v1.2` to `v1.3`.
-- Revert [discovery SRV auth `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/coreos/etcd/pull/8651) to support non-wildcard subject alternative names in the certs (see [issue #8445](https://github.com/coreos/etcd/issues/8445) for more contexts).
- - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `etcd.local` (**not `*.etcd.local`**) as an entry in Subject Alternative Name (SAN) field.
-
-
-## [v3.2.9](https://github.com/coreos/etcd/releases/tag/v3.2.9) (2017-10-06)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.8...v3.2.9) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed(Security)
-
-- Compile with [Go 1.8.4](https://groups.google.com/d/msg/golang-nuts/sHfMg4gZNps/a-HDgDDDAAAJ).
-- Update `golang.org/x/crypto/bcrypt` (see [golang/crypto@6c586e1](https://github.com/golang/crypto/commit/6c586e17d90a7d08bbbc4069984180dce3b04117)).
-- Fix discovery SRV bootstrapping to [authenticate `ServerName` with `*.{ROOT_DOMAIN}`](https://github.com/coreos/etcd/pull/8651), in order to support sub-domain wildcard matching (see [issue #8445](https://github.com/coreos/etcd/issues/8445) for more contexts).
- - For instance, `etcd --discovery-srv=etcd.local` will only authenticate peers/clients when the provided certs have root domain `*.etcd.local` as an entry in Subject Alternative Name (SAN) field.
-
-
-## [v3.2.8](https://github.com/coreos/etcd/releases/tag/v3.2.8) (2017-09-29)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.7...v3.2.8) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Fix v2 client failover to next endpoint on mutable operation.
-- Fix grpc-proxy to respect `KeysOnly` flag.
-
-
-## [v3.2.7](https://github.com/coreos/etcd/releases/tag/v3.2.7) (2017-09-01)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.6...v3.2.7) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Fix server-side auth so concurrent auth operations do not return old revision error.
-- Fix concurrency/stm Put with serializable snapshot
- - Use store revision from first fetch to resolve write conflicts instead of modified revision.
-
-
-## [v3.2.6](https://github.com/coreos/etcd/releases/tag/v3.2.6) (2017-08-21)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.5...v3.2.6).
-
-### Fixed
-
-- Fix watch restore from snapshot.
-- Fix `etcd_debugging_mvcc_keys_total` inconsistency.
-- Fix multiple URLs for `--listen-peer-urls` flag.
-- Add `--enable-pprof` flag to etcd configuration file format.
-
-
-## [v3.2.5](https://github.com/coreos/etcd/releases/tag/v3.2.5) (2017-08-04)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.4...v3.2.5) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Changed
-
-- Use reverse lookup to match wildcard DNS SAN.
-- Return non-zero exit code on unhealthy `endpoint health`.
-
-### Fixed
-
-- Fix unreachable /metrics endpoint when `--enable-v2=false`.
-- Fix grpc-proxy to respect `PrevKv` flag.
-
-### Added
-
-- Add container registry `gcr.io/etcd-development/etcd`.
-
-
-## [v3.2.4](https://github.com/coreos/etcd/releases/tag/v3.2.4) (2017-07-19)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.3...v3.2.4) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Do not block on active client stream when stopping server
-- Fix gRPC proxy Snapshot RPC error handling
-
-
-## [v3.2.3](https://github.com/coreos/etcd/releases/tag/v3.2.3) (2017-07-14)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.2...v3.2.3) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Let clients establish unlimited streams
-
-### Added
-
-- Tag docker images with minor versions
- - e.g. `docker pull quay.io/coreos/etcd:v3.2` to fetch latest v3.2 versions
-
-
-## [v3.1.10](https://github.com/coreos/etcd/releases/tag/v3.1.10) (2017-07-14)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.9...v3.1.10) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Changed
-
-- Compile with Go 1.8.3 to fix panic on `net/http.CloseNotify`
-
-### Added
-
-- Tag docker images with minor versions.
- - e.g. `docker pull quay.io/coreos/etcd:v3.1` to fetch latest v3.1 versions.
-
-
-## [v3.2.2](https://github.com/coreos/etcd/releases/tag/v3.2.2) (2017-07-07)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.1...v3.2.2) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Improved
-
-- Rate-limit lease revoke on expiration.
-- Extend leases on promote to avoid queueing effect on lease expiration.
-
-### Fixed
-
-- Use user-provided listen address to connect to gRPC gateway:
- - `net.Listener` rewrites IPv4 0.0.0.0 to IPv6 [::], breaking IPv6 disabled hosts.
- - Only v3.2.0, v3.2.1 are affected.
-- Accept connection with matched IP SAN but no DNS match.
- - Don't check DNS entries in certs if there's a matching IP.
-- Fix 'tools/benchmark' watch command.
-
-
-## [v3.2.1](https://github.com/coreos/etcd/releases/tag/v3.2.1) (2017-06-23)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.2.0...v3.2.1) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Fixed
-
-- Fix backend database in-memory index corruption issue on restore (only 3.2.0 is affected).
-- Fix gRPC gateway Txn marshaling issue.
-- Fix backend database size debugging metrics.
-
-
-## [v3.2.0](https://github.com/coreos/etcd/releases/tag/v3.2.0) (2017-06-09)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.0...v3.2.0) and [v3.2 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_2.md) for any breaking changes.
-
-### Improved
-
-- Improve backend read concurrency.
-
-### Added
-
-- Embedded etcd
- - `Etcd.Peers` field is now `[]*peerListener`.
-- RPCs
- - Add Election, Lock service.
-- Native client etcdserver/api/v3client
- - client "embedded" in the server.
-- gRPC proxy
- - Proxy endpoint discovery.
- - Namespaces.
- - Coalesce lease requests.
-- v3 client
- - STM prefetching.
- - Add namespace feature.
- - Add `ErrOldCluster` with server version checking.
- - Translate `WithPrefix()` into `WithFromKey()` for empty key.
-- v3 etcdctl
- - Add `check perf` command.
- - Add `--from-key` flag to role grant-permission command.
- - `lock` command takes an optional command to execute.
-- etcd flags
- - Add `--enable-v2` flag to configure v2 backend (enabled by default).
- - Add `--auth-token` flag.
-- `etcd gateway`
- - Support DNS SRV priority.
-- Auth
- - Support Watch API.
- - JWT tokens.
-- Logging, monitoring
- - Server warns large snapshot operations.
- - Add `etcd_debugging_server_lease_expired_total` metrics.
-- Security
- - Deny incoming peer certs with wrong IP SAN.
- - Resolve TLS `DNSNames` when SAN checking.
- - Reload TLS certificates on every client connection.
-- Release
- - Annotate acbuild with supports-systemd-notify.
- - Add `nsswitch.conf` to Docker container image.
- - Add ppc64le, arm64(experimental) builds.
- - Compile with `Go 1.8.3`.
-
-### Changed
-
-- v3 client
- - `LeaseTimeToLive` returns TTL=-1 resp on lease not found.
- - `clientv3.NewFromConfigFile` is moved to `clientv3/yaml.NewConfig`.
- - concurrency package's elections updated to match RPC interfaces.
- - let client dial endpoints not in the balancer.
-- Dependencies
- - Update [`google.golang.org/grpc`](https://github.com/grpc/grpc-go/releases) to `v1.2.1`.
- - Update [`github.com/grpc-ecosystem/grpc-gateway`](https://github.com/grpc-ecosystem/grpc-gateway/releases) to `v1.2.0`.
-
-### Fixed
-
-- Allow v2 snapshot over 512MB.
-
-
-## [v3.1.9](https://github.com/coreos/etcd/releases/tag/v3.1.9) (2017-06-09)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.8...v3.1.9) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Fixed
-
-- Allow v2 snapshot over 512MB.
-
-
-## [v3.1.8](https://github.com/coreos/etcd/releases/tag/v3.1.8) (2017-05-19)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.7...v3.1.8) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-
-## [v3.1.7](https://github.com/coreos/etcd/releases/tag/v3.1.7) (2017-04-28)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.6...v3.1.7) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-
-## [v3.1.6](https://github.com/coreos/etcd/releases/tag/v3.1.6) (2017-04-19)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.5...v3.1.6) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Changed
-
-- Remove auth check in Status API.
-
-### Fixed
-
-- Fill in Auth API response header.
-
-
-## [v3.1.5](https://github.com/coreos/etcd/releases/tag/v3.1.5) (2017-03-27)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.4...v3.1.5) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Added
-
-- Add `/etc/nsswitch.conf` file to alpine-based Docker image.
-
-### Fixed
-
-- Fix raft memory leak issue.
-- Fix Windows file path issues.
-
-
-## [v3.1.4](https://github.com/coreos/etcd/releases/tag/v3.1.4) (2017-03-22)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.3...v3.1.4) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-
-## [v3.1.3](https://github.com/coreos/etcd/releases/tag/v3.1.3) (2017-03-10)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.2...v3.1.3) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Changed
-
-- Use machine default host when advertise URLs are default values(`localhost:2379,2380`) AND if listen URL is `0.0.0.0`.
-
-### Fixed
-
-- Fix `etcd gateway` schema handling in DNS discovery.
-- Fix sd_notify behaviors in `gateway`, `grpc-proxy`.
-
-
-## [v3.1.2](https://github.com/coreos/etcd/releases/tag/v3.1.2) (2017-02-24)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.1...v3.1.2) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Changed
-
-- Use IPv4 default host, by default (when IPv4 and IPv6 are available).
-
-### Fixed
-
-- Fix `etcd gateway` with multiple endpoints.
-
-
-## [v3.1.1](https://github.com/coreos/etcd/releases/tag/v3.1.1) (2017-02-17)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.1.0...v3.1.1) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Changed
-
-- Compile with `Go 1.7.5`.
-
-
-## [v2.3.8](https://github.com/coreos/etcd/releases/tag/v2.3.8) (2017-02-17)
-
-See [code changes](https://github.com/coreos/etcd/compare/v2.3.7...v2.3.8).
-
-### Changed
-
-- Compile with `Go 1.7.5`.
-
-
-## [v3.1.0](https://github.com/coreos/etcd/releases/tag/v3.1.0) (2017-01-20)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.0...v3.1.0) and [v3.1 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_1.md) for any breaking changes.
-
-### Improved
-
-- Faster linearizable reads (implements Raft read-index).
-- v3 authentication API is now stable.
-
-### Added
-
-- Automatic leadership transfer when leader steps down.
-- etcd flags
- - `--strict-reconfig-check` flag is set by default.
- - Add `--log-output` flag.
- - Add `--metrics` flag.
-- v3 client
- - Add `SetEndpoints` method; update endpoints at runtime.
- - Add `Sync` method; auto-update endpoints at runtime.
- - Add `Lease TimeToLive` API; fetch lease information.
- - replace Config.Logger field with global logger.
- - Get API responses are sorted in ascending order by default.
-- v3 etcdctl
- - Add `lease timetolive` command.
- - Add `--print-value-only` flag to get command.
- - Add `--dest-prefix` flag to make-mirror command.
- - `get` command responses are sorted in ascending order by default.
-- `recipes` now conform to sessions defined in `clientv3/concurrency`.
-- ACI has symlinks to `/usr/local/bin/etcd*`.
-- Experimental gRPC proxy feature.
-
-### Changed
-
-- Deprecated following gRPC metrics in favor of [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus):
- - `etcd_grpc_requests_total`
- - `etcd_grpc_requests_failed_total`
- - `etcd_grpc_active_streams`
- - `etcd_grpc_unary_requests_duration_seconds`
-- etcd uses default route IP if advertise URL is not given.
-- Cluster rejects removing members if quorum will be lost.
-- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given.
- - `TLSConfig.ServerName` is ignored with user-provided certificates for backwards compatibility; to be deprecated.
- - For example, `etcd --discovery-srv=example.com` will only authenticate peers/clients when the provided certs have root domain `example.com` as an entry in Subject Alternative Name (SAN) field.
-- Discovery now has upper limit for waiting on retries.
-- Warn on binding listeners through domain names; to be deprecated.
-
-
-## [v3.0.16](https://github.com/coreos/etcd/releases/tag/v3.0.16) (2016-11-13)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.15...v3.0.16) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-
-## [v3.0.15](https://github.com/coreos/etcd/releases/tag/v3.0.15) (2016-11-11)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.14...v3.0.15) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Fixed
-
-- Fix cancel watch request with wrong range end.
-
-
-## [v3.0.14](https://github.com/coreos/etcd/releases/tag/v3.0.14) (2016-11-04)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.13...v3.0.14) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Added
-
-- v3 `etcdctl migrate` command now supports `--no-ttl` flag to discard keys on transform.
-
-
-## [v3.0.13](https://github.com/coreos/etcd/releases/tag/v3.0.13) (2016-10-24)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.12...v3.0.13) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-
-## [v3.0.12](https://github.com/coreos/etcd/releases/tag/v3.0.12) (2016-10-07)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.11...v3.0.12) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-
-## [v3.0.11](https://github.com/coreos/etcd/releases/tag/v3.0.11) (2016-10-07)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.10...v3.0.11) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Added
-
-- Server returns previous key-value (optional)
- - `clientv3.WithPrevKV` option
- - v3 etcdctl `put,watch,del --prev-kv` flag
-
-
-## [v3.0.10](https://github.com/coreos/etcd/releases/tag/v3.0.10) (2016-09-23)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.9...v3.0.10) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-
-## [v3.0.9](https://github.com/coreos/etcd/releases/tag/v3.0.9) (2016-09-15)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.8...v3.0.9) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Added
-
-- Warn on domain names on listen URLs (v3.2 will reject domain names).
-
-
-## [v3.0.8](https://github.com/coreos/etcd/releases/tag/v3.0.8) (2016-09-09)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.7...v3.0.8) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Changed
-
-- Allow only IP addresses in listen URLs (domain names are rejected).
-
-
-## [v3.0.7](https://github.com/coreos/etcd/releases/tag/v3.0.7) (2016-08-31)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.6...v3.0.7) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Changed
-
-- SRV records only allow A records (RFC 2052).
-
-
-## [v3.0.6](https://github.com/coreos/etcd/releases/tag/v3.0.6) (2016-08-19)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.5...v3.0.6) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-
-## [v3.0.5](https://github.com/coreos/etcd/releases/tag/v3.0.5) (2016-08-19)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.4...v3.0.5) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Changed
-
-- SRV records (e.g., infra1.example.com) must match the discovery domain (i.e., example.com) if no custom certificate authority is given.
-
-
-## [v3.0.4](https://github.com/coreos/etcd/releases/tag/v3.0.4) (2016-07-27)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.3...v3.0.4) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Changed
-
-- v2 auth can now use common name from TLS certificate when `--client-cert-auth` is enabled.
-
-### Added
-
-- v2 `etcdctl ls` command now supports `--output=json`.
-- Add /var/lib/etcd directory to etcd official Docker image.
-
-
-## [v3.0.3](https://github.com/coreos/etcd/releases/tag/v3.0.3) (2016-07-15)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.2...v3.0.3) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Changed
-
-- Revert Dockerfile to use `CMD`, instead of `ENTRYPOINT`, to support `etcdctl` run.
- - Docker commands for v3.0.2 won't work without specifying executable binary paths.
-- v3 etcdctl default endpoints are now `127.0.0.1:2379`.
-
-
-## [v3.0.2](https://github.com/coreos/etcd/releases/tag/v3.0.2) (2016-07-08)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.1...v3.0.2) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-### Changed
-
-- Dockerfile uses `ENTRYPOINT`, instead of `CMD`, to run etcd without binary path specified.
-
-
-## [v3.0.1](https://github.com/coreos/etcd/releases/tag/v3.0.1) (2016-07-01)
-
-See [code changes](https://github.com/coreos/etcd/compare/v3.0.0...v3.0.1) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
-
-
-## [v3.0.0](https://github.com/coreos/etcd/releases/tag/v3.0.0) (2016-06-30)
-
-See [code changes](https://github.com/coreos/etcd/compare/v2.3.0...v3.0.0) and [v3.0 upgrade guide](https://github.com/coreos/etcd/blob/master/Documentation/upgrades/upgrade_3_0.md) for any breaking changes.
diff --git a/vendor/github.com/coreos/etcd/CODE_OF_CONDUCT.md b/vendor/github.com/coreos/etcd/CODE_OF_CONDUCT.md
deleted file mode 100644
index c0c20dd..0000000
--- a/vendor/github.com/coreos/etcd/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,63 +0,0 @@
-## CoreOS Community Code of Conduct
-
-### Contributor Code of Conduct
-
-As contributors and maintainers of this project, and in the interest of
-fostering an open and welcoming community, we pledge to respect all people who
-contribute through reporting issues, posting feature requests, updating
-documentation, submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project a harassment-free
-experience for everyone, regardless of level of experience, gender, gender
-identity and expression, sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing others' private information, such as physical or electronic addresses, without explicit permission
-* Other unethical or unprofessional conduct.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently applying these
-principles to every aspect of managing this project. Project maintainers who do
-not follow or enforce the Code of Conduct may be permanently removed from the
-project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting a project maintainer, Brandon Philips
-<brandon.philips@coreos.com>, and/or Meghan Schofield
-<meghan.schofield@coreos.com>.
-
-This Code of Conduct is adapted from the Contributor Covenant
-(http://contributor-covenant.org), version 1.2.0, available at
-http://contributor-covenant.org/version/1/2/0/
-
-### CoreOS Events Code of Conduct
-
-CoreOS events are working conferences intended for professional networking and
-collaboration in the CoreOS community. Attendees are expected to behave
-according to professional standards and in accordance with their employer’s
-policies on appropriate workplace behavior.
-
-While at CoreOS events or related social networking opportunities, attendees
-should not engage in discriminatory or offensive speech or actions including
-but not limited to gender, sexuality, race, age, disability, or religion.
-Speakers should be especially aware of these concerns.
-
-CoreOS does not condone any statements by speakers contrary to these standards.
-CoreOS reserves the right to deny entrance and/or eject from an event (without
-refund) any individual found to be engaging in discriminatory or offensive
-speech or actions.
-
-Please bring any concerns to the immediate attention of designated on-site
-staff, Brandon Philips <brandon.philips@coreos.com>, and/or Meghan Schofield
-<meghan.schofield@coreos.com>.
diff --git a/vendor/github.com/coreos/etcd/CONTRIBUTING.md b/vendor/github.com/coreos/etcd/CONTRIBUTING.md
deleted file mode 100644
index 31cef1f..0000000
--- a/vendor/github.com/coreos/etcd/CONTRIBUTING.md
+++ /dev/null
@@ -1,62 +0,0 @@
-# How to contribute
-
-etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests. This document outlines some of the conventions on commit message formatting, contact points for developers, and other resources to help get contributions into etcd.
-
-# Email and chat
-
-- Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
-- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) IRC channel on freenode.org
-
-## Getting started
-
-- Fork the repository on GitHub
-- Read the README.md for build instructions
-
-## Reporting bugs and creating issues
-
-Reporting bugs is one of the best ways to contribute. However, a good bug report has some very specific qualities, so please read over our short document on [reporting bugs](https://github.com/coreos/etcd/blob/master/Documentation/reporting_bugs.md) before submitting a bug report. This document might contain links to known issues, another good reason to take a look there before reporting a bug.
-
-## Contribution flow
-
-This is a rough outline of what a contributor's workflow looks like:
-
-- Create a topic branch from where to base the contribution. This is usually master.
-- Make commits of logical units.
-- Make sure commit messages are in the proper format (see below).
-- Push changes in a topic branch to a personal fork of the repository.
-- Submit a pull request to coreos/etcd.
-- The PR must receive a LGTM from two maintainers found in the MAINTAINERS file.
-
-Thanks for contributing!
-
-### Code style
-
-The coding style suggested by the Golang community is used in etcd. See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details.
-
-Please follow this style to make etcd easy to review, maintain and develop.
-
-### Format of the commit message
-
-We follow a rough convention for commit messages that is designed to answer two
-questions: what changed and why. The subject line should feature the what and
-the body of the commit should describe the why.
-
-```
-scripts: add the test-cluster command
-
-this uses tmux to setup a test cluster that can easily be killed and started for debugging.
-
-Fixes #38
-```
-
-The format can be described more formally as follows:
-
-```
-<subsystem>: <what changed>
-<BLANK LINE>
-<why this change was made>
-<BLANK LINE>
-<footer>
-```
-
-The first line is the subject and should be no longer than 70 characters, the second line is always blank, and other lines should be wrapped at 80 characters. This allows the message to be easier to read on GitHub as well as in various git tools.
diff --git a/vendor/github.com/coreos/etcd/DCO b/vendor/github.com/coreos/etcd/DCO
deleted file mode 100644
index 716561d..0000000
--- a/vendor/github.com/coreos/etcd/DCO
+++ /dev/null
@@ -1,36 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/vendor/github.com/coreos/etcd/Dockerfile b/vendor/github.com/coreos/etcd/Dockerfile
deleted file mode 100644
index c653734..0000000
--- a/vendor/github.com/coreos/etcd/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM golang
-ADD . /go/src/github.com/coreos/etcd
-ADD cmd/vendor /go/src/github.com/coreos/etcd/vendor
-RUN go install github.com/coreos/etcd
-EXPOSE 2379 2380
-ENTRYPOINT ["etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-functional-tester b/vendor/github.com/coreos/etcd/Dockerfile-functional-tester
deleted file mode 100644
index cfd8086..0000000
--- a/vendor/github.com/coreos/etcd/Dockerfile-functional-tester
+++ /dev/null
@@ -1,53 +0,0 @@
-FROM ubuntu:17.10
-
-RUN rm /bin/sh && ln -s /bin/bash /bin/sh
-RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
-
-RUN apt-get -y update \
- && apt-get -y install \
- build-essential \
- gcc \
- apt-utils \
- pkg-config \
- software-properties-common \
- apt-transport-https \
- libssl-dev \
- sudo \
- bash \
- curl \
- wget \
- tar \
- git \
- && apt-get -y update \
- && apt-get -y upgrade \
- && apt-get -y autoremove \
- && apt-get -y autoclean
-
-ENV GOROOT /usr/local/go
-ENV GOPATH /go
-ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
-ENV GO_VERSION REPLACE_ME_GO_VERSION
-ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
-RUN rm -rf ${GOROOT} \
- && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
- && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
- && go version
-
-RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
-ADD . ${GOPATH}/src/github.com/coreos/etcd
-
-RUN go get -v github.com/coreos/gofail \
- && pushd ${GOPATH}/src/github.com/coreos/etcd \
- && GO_BUILD_FLAGS="-v" ./build \
- && cp ./bin/etcd /etcd \
- && cp ./bin/etcdctl /etcdctl \
- && GO_BUILD_FLAGS="-v" FAILPOINTS=1 ./build \
- && cp ./bin/etcd /etcd-failpoints \
- && ./tools/functional-tester/build \
- && cp ./bin/etcd-agent /etcd-agent \
- && cp ./bin/etcd-tester /etcd-tester \
- && cp ./bin/etcd-runner /etcd-runner \
- && go build -v -o /benchmark ./cmd/tools/benchmark \
- && go build -v -o /etcd-test-proxy ./cmd/tools/etcd-test-proxy \
- && popd \
- && rm -rf ${GOPATH}/src/github.com/coreos/etcd
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-release b/vendor/github.com/coreos/etcd/Dockerfile-release
deleted file mode 100644
index 736445f..0000000
--- a/vendor/github.com/coreos/etcd/Dockerfile-release
+++ /dev/null
@@ -1,17 +0,0 @@
-FROM alpine:latest
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-RUN mkdir -p /var/etcd/
-RUN mkdir -p /var/lib/etcd/
-
-# Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
-# but Golang relies on /etc/nsswitch.conf to check the order of DNS resolving
-# (see https://github.com/golang/go/commit/9dee7771f561cf6aee081c0af6658cc81fac3918)
-# To fix this we just create /etc/nsswitch.conf and add the following line:
-RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-release.arm64 b/vendor/github.com/coreos/etcd/Dockerfile-release.arm64
deleted file mode 100644
index d8816e5..0000000
--- a/vendor/github.com/coreos/etcd/Dockerfile-release.arm64
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM aarch64/ubuntu:16.04
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD var/etcd /var/etcd
-ADD var/lib/etcd /var/lib/etcd
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-release.ppc64le b/vendor/github.com/coreos/etcd/Dockerfile-release.ppc64le
deleted file mode 100644
index 2fb02c4..0000000
--- a/vendor/github.com/coreos/etcd/Dockerfile-release.ppc64le
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM ppc64le/ubuntu:16.04
-
-ADD etcd /usr/local/bin/
-ADD etcdctl /usr/local/bin/
-ADD var/etcd /var/etcd
-ADD var/lib/etcd /var/lib/etcd
-
-EXPOSE 2379 2380
-
-# Define default command.
-CMD ["/usr/local/bin/etcd"]
diff --git a/vendor/github.com/coreos/etcd/Dockerfile-test b/vendor/github.com/coreos/etcd/Dockerfile-test
deleted file mode 100644
index dea3ab0..0000000
--- a/vendor/github.com/coreos/etcd/Dockerfile-test
+++ /dev/null
@@ -1,58 +0,0 @@
-FROM ubuntu:16.10
-
-RUN rm /bin/sh && ln -s /bin/bash /bin/sh
-RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
-
-RUN apt-get -y update \
- && apt-get -y install \
- build-essential \
- gcc \
- apt-utils \
- pkg-config \
- software-properties-common \
- apt-transport-https \
- libssl-dev \
- sudo \
- bash \
- curl \
- wget \
- tar \
- git \
- netcat \
- libaspell-dev \
- libhunspell-dev \
- hunspell-en-us \
- aspell-en \
- shellcheck \
- && apt-get -y update \
- && apt-get -y upgrade \
- && apt-get -y autoremove \
- && apt-get -y autoclean
-
-ENV GOROOT /usr/local/go
-ENV GOPATH /go
-ENV PATH ${GOPATH}/bin:${GOROOT}/bin:${PATH}
-ENV GO_VERSION REPLACE_ME_GO_VERSION
-ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang
-RUN rm -rf ${GOROOT} \
- && curl -s ${GO_DOWNLOAD_URL}/go${GO_VERSION}.linux-amd64.tar.gz | tar -v -C /usr/local/ -xz \
- && mkdir -p ${GOPATH}/src ${GOPATH}/bin \
- && go version
-
-RUN mkdir -p ${GOPATH}/src/github.com/coreos/etcd
-WORKDIR ${GOPATH}/src/github.com/coreos/etcd
-
-ADD ./scripts/install-marker.sh /tmp/install-marker.sh
-
-RUN go get -v -u -tags spell github.com/chzchzchz/goword \
- && go get -v -u github.com/coreos/license-bill-of-materials \
- && go get -v -u honnef.co/go/tools/cmd/gosimple \
- && go get -v -u honnef.co/go/tools/cmd/unused \
- && go get -v -u honnef.co/go/tools/cmd/staticcheck \
- && go get -v -u github.com/gyuho/gocovmerge \
- && go get -v -u github.com/gordonklaus/ineffassign \
- && go get -v -u github.com/alexkohler/nakedret \
- && /tmp/install-marker.sh amd64 \
- && rm -f /tmp/install-marker.sh \
- && curl -s https://codecov.io/bash >/codecov \
- && chmod 700 /codecov
diff --git a/vendor/github.com/coreos/etcd/MAINTAINERS b/vendor/github.com/coreos/etcd/MAINTAINERS
deleted file mode 100644
index 9983e3c..0000000
--- a/vendor/github.com/coreos/etcd/MAINTAINERS
+++ /dev/null
@@ -1,8 +0,0 @@
-Anthony Romano <anthony.romano@coreos.com> (@heyitsanthony) pkg:*
-Brandon Philips <brandon.philips@coreos.com> (@philips) pkg:*
-Fanmin Shi <fanmin.shi@coreos.com> (@fanminshi) pkg:*
-Gyu-Ho Lee <gyu_ho.lee@coreos.com> (@gyuho) pkg:*
-Xiang Li <xiang.li@coreos.com> (@xiang90) pkg:*
-
-Ben Darnell <ben@cockroachlabs.com> (@bdarnell) pkg:github.com/coreos/etcd/raft
-Hitoshi Mitake <mitake.hitoshi@lab.ntt.co.jp> (@mitake) pkg:github.com/coreos/etcd/auth
diff --git a/vendor/github.com/coreos/etcd/Makefile b/vendor/github.com/coreos/etcd/Makefile
deleted file mode 100644
index a8eceef..0000000
--- a/vendor/github.com/coreos/etcd/Makefile
+++ /dev/null
@@ -1,517 +0,0 @@
-# run from repository root
-
-
-
-# Example:
-# make build
-# make clean
-# make docker-clean
-# make docker-start
-# make docker-kill
-# make docker-remove
-
-.PHONY: build
-build:
- GO_BUILD_FLAGS="-v" ./build
- ./bin/etcd --version
- ETCDCTL_API=3 ./bin/etcdctl version
-
-clean:
- rm -f ./codecov
- rm -rf ./agent-*
- rm -rf ./covdir
- rm -f ./*.coverprofile
- rm -f ./*.log
- rm -f ./bin/Dockerfile-release
- rm -rf ./bin/*.etcd
- rm -rf ./default.etcd
- rm -rf ./tests/e2e/default.etcd
- rm -rf ./gopath
- rm -rf ./gopath.proto
- rm -rf ./release
- rm -f ./snapshot/localhost:*
- rm -f ./integration/127.0.0.1:* ./integration/localhost:*
- rm -f ./clientv3/integration/127.0.0.1:* ./clientv3/integration/localhost:*
- rm -f ./clientv3/ordering/127.0.0.1:* ./clientv3/ordering/localhost:*
-
-docker-clean:
- docker images
- docker image prune --force
-
-docker-start:
- service docker restart
-
-docker-kill:
- docker kill `docker ps -q` || true
-
-docker-remove:
- docker rm --force `docker ps -a -q` || true
- docker rmi --force `docker images -q` || true
-
-
-
-GO_VERSION ?= 1.10.3
-ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound")
-
-TEST_SUFFIX = $(shell date +%s | base64 | head -c 15)
-TEST_OPTS ?= PASSES='unit'
-
-TMP_DIR_MOUNT_FLAG = --mount type=tmpfs,destination=/tmp
-ifdef HOST_TMP_DIR
- TMP_DIR_MOUNT_FLAG = --mount type=bind,source=$(HOST_TMP_DIR),destination=/tmp
-endif
-
-
-
-# Example:
-# GO_VERSION=1.8.7 make build-docker-test
-# make build-docker-test
-#
-# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
-# GO_VERSION=1.8.7 make push-docker-test
-# make push-docker-test
-#
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-test
-
-build-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/Dockerfile
- docker build \
- --tag gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- --file ./tests/Dockerfile .
- @mv ./tests/Dockerfile.bak ./tests/Dockerfile
-
-push-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- gcloud docker -- push gcr.io/etcd-development/etcd-test:go$(GO_VERSION)
-
-pull-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-test:go$(GO_VERSION)
-
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make compile-setup-gopath-with-docker-test
-
-compile-with-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker run \
- --rm \
- --mount type=bind,source=`pwd`,destination=/go/src/github.com/coreos/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash -c "GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version"
-
-compile-setup-gopath-with-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker run \
- --rm \
- --mount type=bind,source=`pwd`,destination=/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && ETCD_SETUP_GOPATH=1 GO_BUILD_FLAGS=-v ./build && ./bin/etcd --version && rm -rf ./gopath"
-
-
-
-# Example:
-#
-# Local machine:
-# TEST_OPTS="PASSES='fmt'" make test
-# TEST_OPTS="PASSES='fmt bom dep build unit'" make test
-# TEST_OPTS="PASSES='build unit release integration_e2e functional'" make test
-# TEST_OPTS="PASSES='build grpcproxy'" make test
-#
-# Example (test with docker):
-# make pull-docker-test
-# TEST_OPTS="PASSES='fmt'" make docker-test
-# TEST_OPTS="VERBOSE=2 PASSES='unit'" make docker-test
-#
-# Travis CI (test with docker):
-# TEST_OPTS="PASSES='fmt bom dep build unit'" make docker-test
-#
-# Semaphore CI (test with docker):
-# TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test
-# HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test
-# TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'" make docker-test
-#
-# grpc-proxy tests (test with docker):
-# TEST_OPTS="PASSES='build grpcproxy'" make docker-test
-# HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make docker-test
-
-.PHONY: test
-test:
- $(info TEST_OPTS: $(TEST_OPTS))
- $(info log-file: test-$(TEST_SUFFIX).log)
- $(TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log
- ! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log
-
-docker-test:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- $(info TEST_OPTS: $(TEST_OPTS))
- $(info log-file: test-$(TEST_SUFFIX).log)
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`,destination=/go/src/github.com/coreos/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash -c "$(TEST_OPTS) ./test 2>&1 | tee test-$(TEST_SUFFIX).log"
- ! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log
-
-docker-test-coverage:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- $(info log-file: docker-test-coverage-$(TEST_SUFFIX).log)
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`,destination=/go/src/github.com/coreos/etcd \
- gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \
- /bin/bash -c "COVERDIR=covdir PASSES='build build_cov cov' ./test 2>&1 | tee docker-test-coverage-$(TEST_SUFFIX).log && /codecov -t 6040de41-c073-4d6f-bbf8-d89256ef31e1"
- ! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 docker-test-coverage-$(TEST_SUFFIX).log
-
-
-
-# Example:
-# make compile-with-docker-test
-# ETCD_VERSION=v3-test make build-docker-release-master
-# ETCD_VERSION=v3-test make push-docker-release-master
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-
-build-docker-release-master:
- $(info ETCD_VERSION: $(ETCD_VERSION))
- cp ./Dockerfile-release ./bin/Dockerfile-release
- docker build \
- --tag gcr.io/etcd-development/etcd:$(ETCD_VERSION) \
- --file ./bin/Dockerfile-release \
- ./bin
- rm -f ./bin/Dockerfile-release
-
- docker run \
- --rm \
- gcr.io/etcd-development/etcd:$(ETCD_VERSION) \
- /bin/sh -c "/usr/local/bin/etcd --version && ETCDCTL_API=3 /usr/local/bin/etcdctl version"
-
-push-docker-release-master:
- $(info ETCD_VERSION: $(ETCD_VERSION))
- gcloud docker -- push gcr.io/etcd-development/etcd:$(ETCD_VERSION)
-
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make build-docker-static-ip-test
-#
-# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
-# make push-docker-static-ip-test
-#
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-static-ip-test
-#
-# make docker-static-ip-test-certs-run
-# make docker-static-ip-test-certs-metrics-proxy-run
-
-build-docker-static-ip-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-static-ip/Dockerfile
- docker build \
- --tag gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
- --file ./tests/docker-static-ip/Dockerfile \
- ./tests/docker-static-ip
- @mv ./tests/docker-static-ip/Dockerfile.bak ./tests/docker-static-ip/Dockerfile
-
-push-docker-static-ip-test:
- $(info GO_VERSION: $(GO_VERSION))
- gcloud docker -- push gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION)
-
-pull-docker-static-ip-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION)
-
-docker-static-ip-test-certs-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-static-ip/certs,destination=/certs \
- gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
-
-docker-static-ip-test-certs-metrics-proxy-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-static-ip/certs-metrics-proxy,destination=/certs-metrics-proxy \
- gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-metrics-proxy/run.sh && rm -rf m*.etcd"
-
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make build-docker-dns-test
-#
-# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
-# make push-docker-dns-test
-#
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-dns-test
-#
-# make docker-dns-test-insecure-run
-# make docker-dns-test-certs-run
-# make docker-dns-test-certs-gateway-run
-# make docker-dns-test-certs-wildcard-run
-# make docker-dns-test-certs-common-name-auth-run
-# make docker-dns-test-certs-common-name-multi-run
-
-build-docker-dns-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-dns/Dockerfile
- docker build \
- --tag gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- --file ./tests/docker-dns/Dockerfile \
- ./tests/docker-dns
- @mv ./tests/docker-dns/Dockerfile.bak ./tests/docker-dns/Dockerfile
-
- docker run \
- --rm \
- --dns 127.0.0.1 \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig etcd.local"
-
-push-docker-dns-test:
- $(info GO_VERSION: $(GO_VERSION))
- gcloud docker -- push gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION)
-
-pull-docker-dns-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION)
-
-docker-dns-test-insecure-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/insecure,destination=/insecure \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /insecure/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs,destination=/certs \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-gateway-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-gateway,destination=/certs-gateway \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-wildcard-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-wildcard,destination=/certs-wildcard \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-common-name-auth-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-auth,destination=/certs-common-name-auth \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-common-name-auth/run.sh && rm -rf m*.etcd"
-
-docker-dns-test-certs-common-name-multi-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-multi,destination=/certs-common-name-multi \
- gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-common-name-multi/run.sh && rm -rf m*.etcd"
-
-
-
-# Example:
-# make build-docker-test
-# make compile-with-docker-test
-# make build-docker-dns-srv-test
-# gcloud docker -- login -u _json_key -p "$(cat /etc/gcp-key-etcd-development.json)" https://gcr.io
-# make push-docker-dns-srv-test
-# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com
-# make pull-docker-dns-srv-test
-# make docker-dns-srv-test-certs-run
-# make docker-dns-srv-test-certs-gateway-run
-# make docker-dns-srv-test-certs-wildcard-run
-
-build-docker-dns-srv-test:
- $(info GO_VERSION: $(GO_VERSION))
- @sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-dns-srv/Dockerfile
- docker build \
- --tag gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- --file ./tests/docker-dns-srv/Dockerfile \
- ./tests/docker-dns-srv
- @mv ./tests/docker-dns-srv/Dockerfile.bak ./tests/docker-dns-srv/Dockerfile
-
- docker run \
- --rm \
- --dns 127.0.0.1 \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig +noall +answer SRV _etcd-client-ssl._tcp.etcd.local && dig +noall +answer SRV _etcd-server-ssl._tcp.etcd.local && dig +noall +answer m1.etcd.local m2.etcd.local m3.etcd.local"
-
-push-docker-dns-srv-test:
- $(info GO_VERSION: $(GO_VERSION))
- gcloud docker -- push gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION)
-
-pull-docker-dns-srv-test:
- $(info GO_VERSION: $(GO_VERSION))
- docker pull gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION)
-
-docker-dns-srv-test-certs-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs,destination=/certs \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd"
-
-docker-dns-srv-test-certs-gateway-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-gateway,destination=/certs-gateway \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd"
-
-docker-dns-srv-test-certs-wildcard-run:
- $(info GO_VERSION: $(GO_VERSION))
- $(info HOST_TMP_DIR: $(HOST_TMP_DIR))
- $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG))
- docker run \
- --rm \
- --tty \
- --dns 127.0.0.1 \
- $(TMP_DIR_MOUNT_FLAG) \
- --mount type=bind,source=`pwd`/bin,destination=/etcd \
- --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-wildcard,destination=/certs-wildcard \
- gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \
- /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd"
-
-
-
-# Example:
-# make build-functional
-# make build-docker-functional
-# make push-docker-functional
-# make pull-docker-functional
-
-build-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- ./functional/build
- ./bin/etcd-agent -help || true && \
- ./bin/etcd-proxy -help || true && \
- ./bin/etcd-runner --help || true && \
- ./bin/etcd-tester -help || true
-
-build-docker-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- @sed -i.bak 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./functional/Dockerfile
- docker build \
- --tag gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \
- --file ./functional/Dockerfile \
- .
- @mv ./functional/Dockerfile.bak ./functional/Dockerfile
-
- docker run \
- --rm \
- gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \
- /bin/bash -c "./bin/etcd --version && \
- ./bin/etcd-failpoints --version && \
- ETCDCTL_API=3 ./bin/etcdctl version && \
- ./bin/etcd-agent -help || true && \
- ./bin/etcd-proxy -help || true && \
- ./bin/etcd-runner --help || true && \
- ./bin/etcd-tester -help || true && \
- ./bin/benchmark --help || true"
-
-push-docker-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- gcloud docker -- push gcr.io/etcd-development/etcd-functional:go$(GO_VERSION)
-
-pull-docker-functional:
- $(info GO_VERSION: $(GO_VERSION))
- $(info ETCD_VERSION: $(ETCD_VERSION))
- docker pull gcr.io/etcd-development/etcd-functional:go$(GO_VERSION)
diff --git a/vendor/github.com/coreos/etcd/Procfile b/vendor/github.com/coreos/etcd/Procfile
deleted file mode 100644
index 868967c..0000000
--- a/vendor/github.com/coreos/etcd/Procfile
+++ /dev/null
@@ -1,5 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-#proxy: bin/etcd grpc-proxy start --endpoints=127.0.0.1:2379,127.0.0.1:22379,127.0.0.1:32379 --listen-addr=127.0.0.1:23790 --advertise-client-url=127.0.0.1:23790 --enable-pprof
diff --git a/vendor/github.com/coreos/etcd/Procfile.v2 b/vendor/github.com/coreos/etcd/Procfile.v2
deleted file mode 100644
index 41dd49f..0000000
--- a/vendor/github.com/coreos/etcd/Procfile.v2
+++ /dev/null
@@ -1,6 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:2379 --advertise-client-urls http://127.0.0.1:2379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-# in future, use proxy to listen on 2379
-#proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2378 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof
diff --git a/vendor/github.com/coreos/etcd/README.md b/vendor/github.com/coreos/etcd/README.md
deleted file mode 100644
index 2b55901..0000000
--- a/vendor/github.com/coreos/etcd/README.md
+++ /dev/null
@@ -1,161 +0,0 @@
-# etcd
-
-[](https://goreportcard.com/report/github.com/coreos/etcd)
-[](https://codecov.io/gh/coreos/etcd)
-[](https://travis-ci.org/coreos/etcd)
-[](https://semaphoreci.com/coreos/etcd)
-[](https://godoc.org/github.com/coreos/etcd)
-[](https://github.com/coreos/etcd/releases)
-[](https://github.com/coreos/etcd/blob/master/LICENSE)
-
-**Note**: The `master` branch may be in an *unstable or even broken state* during development. Please use [releases][github-release] instead of the `master` branch in order to get stable binaries.
-
-*the etcd v2 [documentation](Documentation/v2/README.md) has moved*
-
-
-
-etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being:
-
-* *Simple*: well-defined, user-facing API (gRPC)
-* *Secure*: automatic TLS with optional client cert authentication
-* *Fast*: benchmarked 10,000 writes/sec
-* *Reliable*: properly distributed using Raft
-
-etcd is written in Go and uses the [Raft][raft] consensus algorithm to manage a highly-available replicated log.
-
-etcd is used [in production by many companies](./Documentation/production-users.md), and the development team stands behind it in critical deployment scenarios, where etcd is frequently teamed with applications such as [Kubernetes][k8s], [fleet][fleet], [locksmith][locksmith], [vulcand][vulcand], [Doorman][doorman], and many others. Reliability is further ensured by rigorous [testing][etcd-tests].
-
-See [etcdctl][etcdctl] for a simple command line client.
-
-[raft]: https://raft.github.io/
-[k8s]: http://kubernetes.io/
-[doorman]: https://github.com/youtube/doorman
-[fleet]: https://github.com/coreos/fleet
-[locksmith]: https://github.com/coreos/locksmith
-[vulcand]: https://github.com/vulcand/vulcand
-[etcdctl]: https://github.com/coreos/etcd/tree/master/etcdctl
-[etcd-tests]: http://dash.etcd.io
-
-## Community meetings
-
-etcd contributors and maintainers have bi-weekly meetings at 11:00 AM (USA Pacific) on Tuesdays. There is an [iCalendar][rfc5545] format for the meetings [here](meeting.ics). Anyone is welcome to join via [Zoom][zoom] or audio-only: +1 669 900 6833. An initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas.
-
-[rfc5545]: https://tools.ietf.org/html/rfc5545
-[zoom]: https://coreos.zoom.us/j/854793406
-[shared-meeting-notes]: https://docs.google.com/document/d/1DbVXOHvd9scFsSmL2oNg4YGOHJdXqtx583DmeVWrB_M/edit#
-
-## Getting started
-
-### Getting etcd
-
-The easiest way to get etcd is to use one of the pre-built release binaries which are available for OSX, Linux, Windows, [rkt][rkt], and Docker. Instructions for using these binaries are on the [GitHub releases page][github-release].
-
-For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `master` branch. This first needs [*Go*](https://golang.org/) installed (version 1.9+ is required). All development occurs on `master`, including new features and bug fixes. Bug fixes are first targeted at `master` and subsequently ported to release branches, as described in the [branch management][branch-management] guide.
-
-[rkt]: https://github.com/rkt/rkt/releases/
-[github-release]: https://github.com/coreos/etcd/releases/
-[branch-management]: ./Documentation/branch_management.md
-[dl-build]: ./Documentation/dl_build.md#build-the-latest-version
-
-### Running etcd
-
-First start a single-member cluster of etcd.
-
-If etcd is installed using the [pre-built release binaries][github-release], run it from the installation location as below:
-
-```sh
-/tmp/etcd-download-test/etcd
-```
-The etcd command can be simply run as such if it is moved to the system path as below:
-
-```sh
-mv /tmp/etcd-download-test/etcd /usr/locale/bin/
-
-etcd
-```
-
-If etcd is [build from the master branch][dl-build], run it as below:
-
-```sh
-./bin/etcd
-```
-
-This will bring up etcd listening on port 2379 for client communication and on port 2380 for server-to-server communication.
-
-Next, let's set a single key, and then retrieve it:
-
-```
-ETCDCTL_API=3 etcdctl put mykey "this is awesome"
-ETCDCTL_API=3 etcdctl get mykey
-```
-
-That's it! etcd is now running and serving client requests. For more
-
-- [Animated quick demo][demo-gif]
-- [Interactive etcd playground][etcd-play]
-
-[demo-gif]: ./Documentation/demo.md
-[etcd-play]: http://play.etcd.io/
-
-### etcd TCP ports
-
-The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication.
-
-[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt
-
-### Running a local etcd cluster
-
-First install [goreman](https://github.com/mattn/goreman), which manages Procfile-based applications.
-
-Our [Procfile script](./Procfile) will set up a local example cluster. Start it with:
-
-```sh
-goreman start
-```
-
-This will bring up 3 etcd members `infra1`, `infra2` and `infra3` and etcd `grpc-proxy`, which runs locally and composes a cluster.
-
-Every cluster member and proxy accepts key value reads and key value writes.
-
-### Running etcd on Kubernetes
-
-To run an etcd cluster on Kubernetes, try [etcd operator](https://github.com/coreos/etcd-operator).
-
-### Next steps
-
-Now it's time to dig into the full etcd API and other guides.
-
-- Read the full [documentation][fulldoc].
-- Explore the full gRPC [API][api].
-- Set up a [multi-machine cluster][clustering].
-- Learn the [config format, env variables and flags][configuration].
-- Find [language bindings and tools][integrations].
-- Use TLS to [secure an etcd cluster][security].
-- [Tune etcd][tuning].
-
-[fulldoc]: ./Documentation/docs.md
-[api]: ./Documentation/dev-guide/api_reference_v3.md
-[clustering]: ./Documentation/op-guide/clustering.md
-[configuration]: ./Documentation/op-guide/configuration.md
-[integrations]: ./Documentation/integrations.md
-[security]: ./Documentation/op-guide/security.md
-[tuning]: ./Documentation/tuning.md
-
-## Contact
-
-- Mailing list: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev)
-- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) on freenode.org
-- Planning/Roadmap: [milestones](https://github.com/coreos/etcd/milestones), [roadmap](./ROADMAP.md)
-- Bugs: [issues](https://github.com/coreos/etcd/issues)
-
-## Contributing
-
-See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the contribution workflow.
-
-## Reporting bugs
-
-See [reporting bugs](Documentation/reporting_bugs.md) for details about reporting any issues.
-
-### License
-
-etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/coreos/etcd/ROADMAP.md b/vendor/github.com/coreos/etcd/ROADMAP.md
deleted file mode 100644
index f7ae890..0000000
--- a/vendor/github.com/coreos/etcd/ROADMAP.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# etcd roadmap
-
-**work in progress**
-
-This document defines a high level roadmap for etcd development.
-
-The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans.
-
-etcd 3.2 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like.
-
-### etcd 3.2 (2017-May)
-- Stable scalable proxy
-- Proxy-as-client interface passthrough
-- Lock service
-- Namespacing proxy
-- TLS Command Name and JWT token based authentication
-- Read-modify-write V3 Put
-- Improved watch performance
-- Support non-blocking concurrent read
-
-### etcd 3.3 (?)
-- TBD
-
diff --git a/vendor/github.com/coreos/etcd/V2Procfile b/vendor/github.com/coreos/etcd/V2Procfile
deleted file mode 100644
index 925910f..0000000
--- a/vendor/github.com/coreos/etcd/V2Procfile
+++ /dev/null
@@ -1,5 +0,0 @@
-# Use goreman to run `go get github.com/mattn/goreman`
-etcd1: bin/etcd --name infra1 --listen-client-urls http://127.0.0.1:12379 --advertise-client-urls http://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd2: bin/etcd --name infra2 --listen-client-urls http://127.0.0.1:22379 --advertise-client-urls http://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-etcd3: bin/etcd --name infra3 --listen-client-urls http://127.0.0.1:32379 --advertise-client-urls http://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --enable-pprof
-proxy: bin/etcd --name infra-proxy1 --proxy=on --listen-client-urls http://127.0.0.1:2379 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --enable-pprof
diff --git a/vendor/github.com/coreos/etcd/alarm/alarms.go b/vendor/github.com/coreos/etcd/alarm/alarms.go
deleted file mode 100644
index 4f0ebe9..0000000
--- a/vendor/github.com/coreos/etcd/alarm/alarms.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package alarm manages health status alarms in etcd.
-package alarm
-
-import (
- "sync"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- alarmBucketName = []byte("alarm")
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "alarm")
-)
-
-type BackendGetter interface {
- Backend() backend.Backend
-}
-
-type alarmSet map[types.ID]*pb.AlarmMember
-
-// AlarmStore persists alarms to the backend.
-type AlarmStore struct {
- mu sync.Mutex
- types map[pb.AlarmType]alarmSet
-
- bg BackendGetter
-}
-
-func NewAlarmStore(bg BackendGetter) (*AlarmStore, error) {
- ret := &AlarmStore{types: make(map[pb.AlarmType]alarmSet), bg: bg}
- err := ret.restore()
- return ret, err
-}
-
-func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
- a.mu.Lock()
- defer a.mu.Unlock()
-
- newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at}
- if m := a.addToMap(newAlarm); m != newAlarm {
- return m
- }
-
- v, err := newAlarm.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal alarm member")
- }
-
- b := a.bg.Backend()
- b.BatchTx().Lock()
- b.BatchTx().UnsafePut(alarmBucketName, v, nil)
- b.BatchTx().Unlock()
-
- return newAlarm
-}
-
-func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
- a.mu.Lock()
- defer a.mu.Unlock()
-
- t := a.types[at]
- if t == nil {
- t = make(alarmSet)
- a.types[at] = t
- }
- m := t[id]
- if m == nil {
- return nil
- }
-
- delete(t, id)
-
- v, err := m.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal alarm member")
- }
-
- b := a.bg.Backend()
- b.BatchTx().Lock()
- b.BatchTx().UnsafeDelete(alarmBucketName, v)
- b.BatchTx().Unlock()
-
- return m
-}
-
-func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) {
- a.mu.Lock()
- defer a.mu.Unlock()
- if at == pb.AlarmType_NONE {
- for _, t := range a.types {
- for _, m := range t {
- ret = append(ret, m)
- }
- }
- return ret
- }
- for _, m := range a.types[at] {
- ret = append(ret, m)
- }
- return ret
-}
-
-func (a *AlarmStore) restore() error {
- b := a.bg.Backend()
- tx := b.BatchTx()
-
- tx.Lock()
- tx.UnsafeCreateBucket(alarmBucketName)
- err := tx.UnsafeForEach(alarmBucketName, func(k, v []byte) error {
- var m pb.AlarmMember
- if err := m.Unmarshal(k); err != nil {
- return err
- }
- a.addToMap(&m)
- return nil
- })
- tx.Unlock()
-
- b.ForceCommit()
- return err
-}
-
-func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember {
- t := a.types[newAlarm.Alarm]
- if t == nil {
- t = make(alarmSet)
- a.types[newAlarm.Alarm] = t
- }
- m := t[types.ID(newAlarm.MemberID)]
- if m != nil {
- return m
- }
- t[types.ID(newAlarm.MemberID)] = newAlarm
- return newAlarm
-}
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
new file mode 100644
index 0000000..1a940c3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
@@ -0,0 +1,807 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: auth.proto
+
+/*
+ Package authpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ auth.proto
+
+ It has these top-level messages:
+ User
+ Permission
+ Role
+*/
+package authpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Permission_Type int32
+
+const (
+ READ Permission_Type = 0
+ WRITE Permission_Type = 1
+ READWRITE Permission_Type = 2
+)
+
+var Permission_Type_name = map[int32]string{
+ 0: "READ",
+ 1: "WRITE",
+ 2: "READWRITE",
+}
+var Permission_Type_value = map[string]int32{
+ "READ": 0,
+ "WRITE": 1,
+ "READWRITE": 2,
+}
+
+func (x Permission_Type) String() string {
+ return proto.EnumName(Permission_Type_name, int32(x))
+}
+func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
+
+// User is a single entry in the bucket authUsers
+type User struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
+
+// Permission is a single entity
+type Permission struct {
+ PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (m *Permission) Reset() { *m = Permission{} }
+func (m *Permission) String() string { return proto.CompactTextString(m) }
+func (*Permission) ProtoMessage() {}
+func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
+
+// Role is a single entry in the bucket authRoles
+type Role struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
+}
+
+func (m *Role) Reset() { *m = Role{} }
+func (m *Role) String() string { return proto.CompactTextString(m) }
+func (*Role) ProtoMessage() {}
+func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
+
+func init() {
+ proto.RegisterType((*User)(nil), "authpb.User")
+ proto.RegisterType((*Permission)(nil), "authpb.Permission")
+ proto.RegisterType((*Role)(nil), "authpb.Role")
+ proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
+}
+func (m *User) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *User) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *Permission) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.PermType != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ return i, nil
+}
+
+func (m *Role) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Role) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.KeyPermission) > 0 {
+ for _, msg := range m.KeyPermission {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *User) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Permission) Size() (n int) {
+ var l int
+ _ = l
+ if m.PermType != 0 {
+ n += 1 + sovAuth(uint64(m.PermType))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ return n
+}
+
+func (m *Role) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if len(m.KeyPermission) > 0 {
+ for _, e := range m.KeyPermission {
+ l = e.Size()
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovAuth(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozAuth(x uint64) (n int) {
+ return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *User) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: User: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
+ if m.Password == nil {
+ m.Password = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Permission) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Permission: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
+ }
+ m.PermType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.PermType |= (Permission_Type(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Role) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Role: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KeyPermission = append(m.KeyPermission, &Permission{})
+ if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipAuth(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthAuth
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipAuth(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
+
+var fileDescriptorAuth = []byte{
+ // 288 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
+ 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
+ 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
+ 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
+ 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
+ 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
+ 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
+ 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
+ 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
+ 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
+ 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
+ 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
+ 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
+ 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
+ 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
+ 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
+ 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
+ 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/github.com/coreos/etcd/auth/doc.go
deleted file mode 100644
index 72741a1..0000000
--- a/vendor/github.com/coreos/etcd/auth/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package auth provides client role authentication for accessing keys in etcd.
-package auth
diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go
deleted file mode 100644
index 99b2d6b..0000000
--- a/vendor/github.com/coreos/etcd/auth/jwt.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
- "crypto/rsa"
- "io/ioutil"
-
- jwt "github.com/dgrijalva/jwt-go"
-)
-
-type tokenJWT struct {
- signMethod string
- signKey *rsa.PrivateKey
- verifyKey *rsa.PublicKey
-}
-
-func (t *tokenJWT) enable() {}
-func (t *tokenJWT) disable() {}
-func (t *tokenJWT) invalidateUser(string) {}
-func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
-
-func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
- // rev isn't used in JWT, it is only used in simple token
- var (
- username string
- revision uint64
- )
-
- parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
- return t.verifyKey, nil
- })
-
- switch err.(type) {
- case nil:
- if !parsed.Valid {
- plog.Warningf("invalid jwt token: %s", token)
- return nil, false
- }
-
- claims := parsed.Claims.(jwt.MapClaims)
-
- username = claims["username"].(string)
- revision = uint64(claims["revision"].(float64))
- default:
- plog.Warningf("failed to parse jwt token: %s", err)
- return nil, false
- }
-
- return &AuthInfo{Username: username, Revision: revision}, true
-}
-
-func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
- // Future work: let a jwt token include permission information would be useful for
- // permission checking in proxy side.
- tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod),
- jwt.MapClaims{
- "username": username,
- "revision": revision,
- })
-
- token, err := tk.SignedString(t.signKey)
- if err != nil {
- plog.Debugf("failed to sign jwt token: %s", err)
- return "", err
- }
-
- plog.Debugf("jwt token: %s", token)
-
- return token, err
-}
-
-func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) {
- for k, v := range opts {
- switch k {
- case "sign-method":
- jwtSignMethod = v
- case "pub-key":
- jwtPubKeyPath = v
- case "priv-key":
- jwtPrivKeyPath = v
- default:
- plog.Errorf("unknown token specific option: %s", k)
- return "", "", "", ErrInvalidAuthOpts
- }
- }
- if len(jwtSignMethod) == 0 {
- return "", "", "", ErrInvalidAuthOpts
- }
- return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil
-}
-
-func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) {
- jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts)
- if err != nil {
- return nil, ErrInvalidAuthOpts
- }
-
- t := &tokenJWT{}
-
- t.signMethod = jwtSignMethod
-
- verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath)
- if err != nil {
- plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err)
- return nil, err
- }
- t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes)
- if err != nil {
- plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err)
- return nil, err
- }
-
- signBytes, err := ioutil.ReadFile(jwtPrivKeyPath)
- if err != nil {
- plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err)
- return nil, err
- }
- t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes)
- if err != nil {
- plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err)
- return nil, err
- }
-
- return t, nil
-}
diff --git a/vendor/github.com/coreos/etcd/auth/nop.go b/vendor/github.com/coreos/etcd/auth/nop.go
deleted file mode 100644
index d437874..0000000
--- a/vendor/github.com/coreos/etcd/auth/nop.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
-)
-
-type tokenNop struct{}
-
-func (t *tokenNop) enable() {}
-func (t *tokenNop) disable() {}
-func (t *tokenNop) invalidateUser(string) {}
-func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil }
-func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
- return nil, false
-}
-func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) {
- return "", ErrAuthFailed
-}
-func newTokenProviderNop() (*tokenNop, error) {
- return &tokenNop{}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go
deleted file mode 100644
index 691b65b..0000000
--- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "github.com/coreos/etcd/auth/authpb"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/adt"
-)
-
-func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
- user := getUser(tx, userName)
- if user == nil {
- plog.Errorf("invalid user name %s", userName)
- return nil
- }
-
- readPerms := &adt.IntervalTree{}
- writePerms := &adt.IntervalTree{}
-
- for _, roleName := range user.Roles {
- role := getRole(tx, roleName)
- if role == nil {
- continue
- }
-
- for _, perm := range role.KeyPermission {
- var ivl adt.Interval
- var rangeEnd []byte
-
- if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
- rangeEnd = perm.RangeEnd
- }
-
- if len(perm.RangeEnd) != 0 {
- ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
- } else {
- ivl = adt.NewBytesAffinePoint(perm.Key)
- }
-
- switch perm.PermType {
- case authpb.READWRITE:
- readPerms.Insert(ivl, struct{}{})
- writePerms.Insert(ivl, struct{}{})
-
- case authpb.READ:
- readPerms.Insert(ivl, struct{}{})
-
- case authpb.WRITE:
- writePerms.Insert(ivl, struct{}{})
- }
- }
- }
-
- return &unifiedRangePermissions{
- readPerms: readPerms,
- writePerms: writePerms,
- }
-}
-
-func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
- if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
- rangeEnd = nil
- }
-
- ivl := adt.NewBytesAffineInterval(key, rangeEnd)
- switch permtyp {
- case authpb.READ:
- return cachedPerms.readPerms.Contains(ivl)
- case authpb.WRITE:
- return cachedPerms.writePerms.Contains(ivl)
- default:
- plog.Panicf("unknown auth type: %v", permtyp)
- }
- return false
-}
-
-func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
- pt := adt.NewBytesAffinePoint(key)
- switch permtyp {
- case authpb.READ:
- return cachedPerms.readPerms.Intersects(pt)
- case authpb.WRITE:
- return cachedPerms.writePerms.Intersects(pt)
- default:
- plog.Panicf("unknown auth type: %v", permtyp)
- }
- return false
-}
-
-func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
- // assumption: tx is Lock()ed
- _, ok := as.rangePermCache[userName]
- if !ok {
- perms := getMergedPerms(tx, userName)
- if perms == nil {
- plog.Errorf("failed to create a unified permission of user %s", userName)
- return false
- }
- as.rangePermCache[userName] = perms
- }
-
- if len(rangeEnd) == 0 {
- return checkKeyPoint(as.rangePermCache[userName], key, permtyp)
- }
-
- return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp)
-}
-
-func (as *authStore) clearCachedPerm() {
- as.rangePermCache = make(map[string]*unifiedRangePermissions)
-}
-
-func (as *authStore) invalidateCachedPerm(userName string) {
- delete(as.rangePermCache, userName)
-}
-
-type unifiedRangePermissions struct {
- readPerms *adt.IntervalTree
- writePerms *adt.IntervalTree
-}
diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go
deleted file mode 100644
index ac55ad7..0000000
--- a/vendor/github.com/coreos/etcd/auth/simple_token.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-// CAUTION: This randum number based token mechanism is only for testing purpose.
-// JWT based mechanism will be added in the near future.
-
-import (
- "context"
- "crypto/rand"
- "fmt"
- "math/big"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-const (
- letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
- defaultSimpleTokenLength = 16
-)
-
-// var for testing purposes
-var (
- simpleTokenTTL = 5 * time.Minute
- simpleTokenTTLResolution = 1 * time.Second
-)
-
-type simpleTokenTTLKeeper struct {
- tokens map[string]time.Time
- donec chan struct{}
- stopc chan struct{}
- deleteTokenFunc func(string)
- mu *sync.Mutex
-}
-
-func (tm *simpleTokenTTLKeeper) stop() {
- select {
- case tm.stopc <- struct{}{}:
- case <-tm.donec:
- }
- <-tm.donec
-}
-
-func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
- tm.tokens[token] = time.Now().Add(simpleTokenTTL)
-}
-
-func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
- if _, ok := tm.tokens[token]; ok {
- tm.tokens[token] = time.Now().Add(simpleTokenTTL)
- }
-}
-
-func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
- delete(tm.tokens, token)
-}
-
-func (tm *simpleTokenTTLKeeper) run() {
- tokenTicker := time.NewTicker(simpleTokenTTLResolution)
- defer func() {
- tokenTicker.Stop()
- close(tm.donec)
- }()
- for {
- select {
- case <-tokenTicker.C:
- nowtime := time.Now()
- tm.mu.Lock()
- for t, tokenendtime := range tm.tokens {
- if nowtime.After(tokenendtime) {
- tm.deleteTokenFunc(t)
- delete(tm.tokens, t)
- }
- }
- tm.mu.Unlock()
- case <-tm.stopc:
- return
- }
- }
-}
-
-type tokenSimple struct {
- indexWaiter func(uint64) <-chan struct{}
- simpleTokenKeeper *simpleTokenTTLKeeper
- simpleTokensMu sync.Mutex
- simpleTokens map[string]string // token -> username
-}
-
-func (t *tokenSimple) genTokenPrefix() (string, error) {
- ret := make([]byte, defaultSimpleTokenLength)
-
- for i := 0; i < defaultSimpleTokenLength; i++ {
- bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
- if err != nil {
- return "", err
- }
-
- ret[i] = letters[bInt.Int64()]
- }
-
- return string(ret), nil
-}
-
-func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
- t.simpleTokensMu.Lock()
- defer t.simpleTokensMu.Unlock()
- if t.simpleTokenKeeper == nil {
- return
- }
-
- _, ok := t.simpleTokens[token]
- if ok {
- plog.Panicf("token %s is alredy used", token)
- }
-
- t.simpleTokens[token] = username
- t.simpleTokenKeeper.addSimpleToken(token)
-}
-
-func (t *tokenSimple) invalidateUser(username string) {
- if t.simpleTokenKeeper == nil {
- return
- }
- t.simpleTokensMu.Lock()
- for token, name := range t.simpleTokens {
- if strings.Compare(name, username) == 0 {
- delete(t.simpleTokens, token)
- t.simpleTokenKeeper.deleteSimpleToken(token)
- }
- }
- t.simpleTokensMu.Unlock()
-}
-
-func (t *tokenSimple) enable() {
- delf := func(tk string) {
- if username, ok := t.simpleTokens[tk]; ok {
- plog.Infof("deleting token %s for user %s", tk, username)
- delete(t.simpleTokens, tk)
- }
- }
- t.simpleTokenKeeper = &simpleTokenTTLKeeper{
- tokens: make(map[string]time.Time),
- donec: make(chan struct{}),
- stopc: make(chan struct{}),
- deleteTokenFunc: delf,
- mu: &t.simpleTokensMu,
- }
- go t.simpleTokenKeeper.run()
-}
-
-func (t *tokenSimple) disable() {
- t.simpleTokensMu.Lock()
- tk := t.simpleTokenKeeper
- t.simpleTokenKeeper = nil
- t.simpleTokens = make(map[string]string) // invalidate all tokens
- t.simpleTokensMu.Unlock()
- if tk != nil {
- tk.stop()
- }
-}
-
-func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
- if !t.isValidSimpleToken(ctx, token) {
- return nil, false
- }
- t.simpleTokensMu.Lock()
- username, ok := t.simpleTokens[token]
- if ok && t.simpleTokenKeeper != nil {
- t.simpleTokenKeeper.resetSimpleToken(token)
- }
- t.simpleTokensMu.Unlock()
- return &AuthInfo{Username: username, Revision: revision}, ok
-}
-
-func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
- // rev isn't used in simple token, it is only used in JWT
- index := ctx.Value(AuthenticateParamIndex{}).(uint64)
- simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
- token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
- t.assignSimpleTokenToUser(username, token)
-
- return token, nil
-}
-
-func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
- splitted := strings.Split(token, ".")
- if len(splitted) != 2 {
- return false
- }
- index, err := strconv.Atoi(splitted[1])
- if err != nil {
- return false
- }
-
- select {
- case <-t.indexWaiter(uint64(index)):
- return true
- case <-ctx.Done():
- }
-
- return false
-}
-
-func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
- return &tokenSimple{
- simpleTokens: make(map[string]string),
- indexWaiter: indexWaiter,
- }
-}
diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go
deleted file mode 100644
index d676cb5..0000000
--- a/vendor/github.com/coreos/etcd/auth/store.go
+++ /dev/null
@@ -1,1136 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "errors"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
-
- "github.com/coreos/etcd/auth/authpb"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/backend"
-
- "github.com/coreos/pkg/capnslog"
- "golang.org/x/crypto/bcrypt"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
-)
-
-var (
- enableFlagKey = []byte("authEnabled")
- authEnabled = []byte{1}
- authDisabled = []byte{0}
-
- revisionKey = []byte("authRevision")
-
- authBucketName = []byte("auth")
- authUsersBucketName = []byte("authUsers")
- authRolesBucketName = []byte("authRoles")
-
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth")
-
- ErrRootUserNotExist = errors.New("auth: root user does not exist")
- ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
- ErrUserAlreadyExist = errors.New("auth: user already exists")
- ErrUserEmpty = errors.New("auth: user name is empty")
- ErrUserNotFound = errors.New("auth: user not found")
- ErrRoleAlreadyExist = errors.New("auth: role already exists")
- ErrRoleNotFound = errors.New("auth: role not found")
- ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
- ErrPermissionDenied = errors.New("auth: permission denied")
- ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
- ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
- ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
- ErrAuthOldRevision = errors.New("auth: revision in header is old")
- ErrInvalidAuthToken = errors.New("auth: invalid auth token")
- ErrInvalidAuthOpts = errors.New("auth: invalid auth options")
- ErrInvalidAuthMgmt = errors.New("auth: invalid auth management")
-
- // BcryptCost is the algorithm cost / strength for hashing auth passwords
- BcryptCost = bcrypt.DefaultCost
-)
-
-const (
- rootUser = "root"
- rootRole = "root"
-
- tokenTypeSimple = "simple"
- tokenTypeJWT = "jwt"
-
- revBytesLen = 8
-)
-
-type AuthInfo struct {
- Username string
- Revision uint64
-}
-
-// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate()
-type AuthenticateParamIndex struct{}
-
-// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate()
-type AuthenticateParamSimpleTokenPrefix struct{}
-
-type AuthStore interface {
- // AuthEnable turns on the authentication feature
- AuthEnable() error
-
- // AuthDisable turns off the authentication feature
- AuthDisable()
-
- // Authenticate does authentication based on given user name and password
- Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
-
- // Recover recovers the state of auth store from the given backend
- Recover(b backend.Backend)
-
- // UserAdd adds a new user
- UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
-
- // UserDelete deletes a user
- UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
-
- // UserChangePassword changes a password of a user
- UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
-
- // UserGrantRole grants a role to the user
- UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
-
- // UserGet gets the detailed information of a users
- UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
-
- // UserRevokeRole revokes a role of a user
- UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
-
- // RoleAdd adds a new role
- RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
-
- // RoleGrantPermission grants a permission to a role
- RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
-
- // RoleGet gets the detailed information of a role
- RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
-
- // RoleRevokePermission gets the detailed information of a role
- RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
-
- // RoleDelete gets the detailed information of a role
- RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
-
- // UserList gets a list of all users
- UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
-
- // RoleList gets a list of all roles
- RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
-
- // IsPutPermitted checks put permission of the user
- IsPutPermitted(authInfo *AuthInfo, key []byte) error
-
- // IsRangePermitted checks range permission of the user
- IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
-
- // IsDeleteRangePermitted checks delete-range permission of the user
- IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
-
- // IsAdminPermitted checks admin permission of the user
- IsAdminPermitted(authInfo *AuthInfo) error
-
- // GenTokenPrefix produces a random string in a case of simple token
- // in a case of JWT, it produces an empty string
- GenTokenPrefix() (string, error)
-
- // Revision gets current revision of authStore
- Revision() uint64
-
- // CheckPassword checks a given pair of username and password is correct
- CheckPassword(username, password string) (uint64, error)
-
- // Close does cleanup of AuthStore
- Close() error
-
- // AuthInfoFromCtx gets AuthInfo from gRPC's context
- AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
-
- // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context
- AuthInfoFromTLS(ctx context.Context) *AuthInfo
-
- // WithRoot generates and installs a token that can be used as a root credential
- WithRoot(ctx context.Context) context.Context
-
- // HasRole checks that user has role
- HasRole(user, role string) bool
-}
-
-type TokenProvider interface {
- info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
- assign(ctx context.Context, username string, revision uint64) (string, error)
- enable()
- disable()
-
- invalidateUser(string)
- genTokenPrefix() (string, error)
-}
-
-type authStore struct {
- // atomic operations; need 64-bit align, or 32-bit tests will crash
- revision uint64
-
- be backend.Backend
- enabled bool
- enabledMu sync.RWMutex
-
- rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
-
- tokenProvider TokenProvider
-}
-
-func (as *authStore) AuthEnable() error {
- as.enabledMu.Lock()
- defer as.enabledMu.Unlock()
- if as.enabled {
- plog.Noticef("Authentication already enabled")
- return nil
- }
- b := as.be
- tx := b.BatchTx()
- tx.Lock()
- defer func() {
- tx.Unlock()
- b.ForceCommit()
- }()
-
- u := getUser(tx, rootUser)
- if u == nil {
- return ErrRootUserNotExist
- }
-
- if !hasRootRole(u) {
- return ErrRootRoleNotExist
- }
-
- tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
-
- as.enabled = true
- as.tokenProvider.enable()
-
- as.rangePermCache = make(map[string]*unifiedRangePermissions)
-
- as.setRevision(getRevision(tx))
-
- plog.Noticef("Authentication enabled")
-
- return nil
-}
-
-func (as *authStore) AuthDisable() {
- as.enabledMu.Lock()
- defer as.enabledMu.Unlock()
- if !as.enabled {
- return
- }
- b := as.be
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
- as.commitRevision(tx)
- tx.Unlock()
- b.ForceCommit()
-
- as.enabled = false
- as.tokenProvider.disable()
-
- plog.Noticef("Authentication disabled")
-}
-
-func (as *authStore) Close() error {
- as.enabledMu.Lock()
- defer as.enabledMu.Unlock()
- if !as.enabled {
- return nil
- }
- as.tokenProvider.disable()
- return nil
-}
-
-func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
- if !as.isAuthEnabled() {
- return nil, ErrAuthNotEnabled
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, username)
- if user == nil {
- return nil, ErrAuthFailed
- }
-
- // Password checking is already performed in the API layer, so we don't need to check for now.
- // Staleness of password can be detected with OCC in the API layer, too.
-
- token, err := as.tokenProvider.assign(ctx, username, as.Revision())
- if err != nil {
- return nil, err
- }
-
- plog.Debugf("authorized %s, token is %s", username, token)
- return &pb.AuthenticateResponse{Token: token}, nil
-}
-
-func (as *authStore) CheckPassword(username, password string) (uint64, error) {
- if !as.isAuthEnabled() {
- return 0, ErrAuthNotEnabled
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, username)
- if user == nil {
- return 0, ErrAuthFailed
- }
-
- if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
- plog.Noticef("authentication failed, invalid password for user %s", username)
- return 0, ErrAuthFailed
- }
-
- return getRevision(tx), nil
-}
-
-func (as *authStore) Recover(be backend.Backend) {
- enabled := false
- as.be = be
- tx := be.BatchTx()
- tx.Lock()
- _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
- if len(vs) == 1 {
- if bytes.Equal(vs[0], authEnabled) {
- enabled = true
- }
- }
-
- as.setRevision(getRevision(tx))
-
- tx.Unlock()
-
- as.enabledMu.Lock()
- as.enabled = enabled
- as.enabledMu.Unlock()
-}
-
-func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- if len(r.Name) == 0 {
- return nil, ErrUserEmpty
- }
-
- hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
- if err != nil {
- plog.Errorf("failed to hash password: %s", err)
- return nil, err
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user != nil {
- return nil, ErrUserAlreadyExist
- }
-
- newUser := &authpb.User{
- Name: []byte(r.Name),
- Password: hashed,
- }
-
- putUser(tx, newUser)
-
- as.commitRevision(tx)
-
- plog.Noticef("added a new user: %s", r.Name)
-
- return &pb.AuthUserAddResponse{}, nil
-}
-
-func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- if as.enabled && strings.Compare(r.Name, rootUser) == 0 {
- plog.Errorf("the user root must not be deleted")
- return nil, ErrInvalidAuthMgmt
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- delUser(tx, r.Name)
-
- as.commitRevision(tx)
-
- as.invalidateCachedPerm(r.Name)
- as.tokenProvider.invalidateUser(r.Name)
-
- plog.Noticef("deleted a user: %s", r.Name)
-
- return &pb.AuthUserDeleteResponse{}, nil
-}
-
-func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
- // If the cost is too high, we should move the encryption to outside of the raft
- hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
- if err != nil {
- plog.Errorf("failed to hash password: %s", err)
- return nil, err
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- updatedUser := &authpb.User{
- Name: []byte(r.Name),
- Roles: user.Roles,
- Password: hashed,
- }
-
- putUser(tx, updatedUser)
-
- as.commitRevision(tx)
-
- as.invalidateCachedPerm(r.Name)
- as.tokenProvider.invalidateUser(r.Name)
-
- plog.Noticef("changed a password of a user: %s", r.Name)
-
- return &pb.AuthUserChangePasswordResponse{}, nil
-}
-
-func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.User)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- if r.Role != rootRole {
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
- }
-
- idx := sort.SearchStrings(user.Roles, r.Role)
- if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 {
- plog.Warningf("user %s is already granted role %s", r.User, r.Role)
- return &pb.AuthUserGrantRoleResponse{}, nil
- }
-
- user.Roles = append(user.Roles, r.Role)
- sort.Strings(user.Roles)
-
- putUser(tx, user)
-
- as.invalidateCachedPerm(r.User)
-
- as.commitRevision(tx)
-
- plog.Noticef("granted role %s to user %s", r.Role, r.User)
- return &pb.AuthUserGrantRoleResponse{}, nil
-}
-
-func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- user := getUser(tx, r.Name)
- tx.Unlock()
-
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- var resp pb.AuthUserGetResponse
- resp.Roles = append(resp.Roles, user.Roles...)
- return &resp, nil
-}
-
-func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- users := getAllUsers(tx)
- tx.Unlock()
-
- resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
- for i := range users {
- resp.Users[i] = string(users[i].Name)
- }
- return resp, nil
-}
-
-func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 {
- plog.Errorf("the role root must not be revoked from the user root")
- return nil, ErrInvalidAuthMgmt
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- updatedUser := &authpb.User{
- Name: user.Name,
- Password: user.Password,
- }
-
- for _, role := range user.Roles {
- if strings.Compare(role, r.Role) != 0 {
- updatedUser.Roles = append(updatedUser.Roles, role)
- }
- }
-
- if len(updatedUser.Roles) == len(user.Roles) {
- return nil, ErrRoleNotGranted
- }
-
- putUser(tx, updatedUser)
-
- as.invalidateCachedPerm(r.Name)
-
- as.commitRevision(tx)
-
- plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
- return &pb.AuthUserRevokeRoleResponse{}, nil
-}
-
-func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- var resp pb.AuthRoleGetResponse
-
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
- resp.Perm = append(resp.Perm, role.KeyPermission...)
- return &resp, nil
-}
-
-func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- roles := getAllRoles(tx)
- tx.Unlock()
-
- resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
- for i := range roles {
- resp.Roles[i] = string(roles[i].Name)
- }
- return resp, nil
-}
-
-func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
-
- updatedRole := &authpb.Role{
- Name: role.Name,
- }
-
- for _, perm := range role.KeyPermission {
- if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) {
- updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
- }
- }
-
- if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
- return nil, ErrPermissionNotGranted
- }
-
- putRole(tx, updatedRole)
-
- // TODO(mitake): currently single role update invalidates every cache
- // It should be optimized.
- as.clearCachedPerm()
-
- as.commitRevision(tx)
-
- plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
- return &pb.AuthRoleRevokePermissionResponse{}, nil
-}
-
-func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- if as.enabled && strings.Compare(r.Role, rootRole) == 0 {
- plog.Errorf("the role root must not be deleted")
- return nil, ErrInvalidAuthMgmt
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
-
- delRole(tx, r.Role)
-
- users := getAllUsers(tx)
- for _, user := range users {
- updatedUser := &authpb.User{
- Name: user.Name,
- Password: user.Password,
- }
-
- for _, role := range user.Roles {
- if strings.Compare(role, r.Role) != 0 {
- updatedUser.Roles = append(updatedUser.Roles, role)
- }
- }
-
- if len(updatedUser.Roles) == len(user.Roles) {
- continue
- }
-
- putUser(tx, updatedUser)
-
- as.invalidateCachedPerm(string(user.Name))
- }
-
- as.commitRevision(tx)
-
- plog.Noticef("deleted role %s", r.Role)
- return &pb.AuthRoleDeleteResponse{}, nil
-}
-
-func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Name)
- if role != nil {
- return nil, ErrRoleAlreadyExist
- }
-
- newRole := &authpb.Role{
- Name: []byte(r.Name),
- }
-
- putRole(tx, newRole)
-
- as.commitRevision(tx)
-
- plog.Noticef("Role %s is created", r.Name)
-
- return &pb.AuthRoleAddResponse{}, nil
-}
-
-func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
- return as.tokenProvider.info(ctx, token, as.Revision())
-}
-
-type permSlice []*authpb.Permission
-
-func (perms permSlice) Len() int {
- return len(perms)
-}
-
-func (perms permSlice) Less(i, j int) bool {
- return bytes.Compare(perms[i].Key, perms[j].Key) < 0
-}
-
-func (perms permSlice) Swap(i, j int) {
- perms[i], perms[j] = perms[j], perms[i]
-}
-
-func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Name)
- if role == nil {
- return nil, ErrRoleNotFound
- }
-
- idx := sort.Search(len(role.KeyPermission), func(i int) bool {
- return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0
- })
-
- if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
- // update existing permission
- role.KeyPermission[idx].PermType = r.Perm.PermType
- } else {
- // append new permission to the role
- newPerm := &authpb.Permission{
- Key: []byte(r.Perm.Key),
- RangeEnd: []byte(r.Perm.RangeEnd),
- PermType: r.Perm.PermType,
- }
-
- role.KeyPermission = append(role.KeyPermission, newPerm)
- sort.Sort(permSlice(role.KeyPermission))
- }
-
- putRole(tx, role)
-
- // TODO(mitake): currently single role update invalidates every cache
- // It should be optimized.
- as.clearCachedPerm()
-
- as.commitRevision(tx)
-
- plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
-
- return &pb.AuthRoleGrantPermissionResponse{}, nil
-}
-
-func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
- // TODO(mitake): this function would be costly so we need a caching mechanism
- if !as.isAuthEnabled() {
- return nil
- }
-
- // only gets rev == 0 when passed AuthInfo{}; no user given
- if revision == 0 {
- return ErrUserEmpty
- }
-
- if revision < as.Revision() {
- return ErrAuthOldRevision
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, userName)
- if user == nil {
- plog.Errorf("invalid user name %s for permission checking", userName)
- return ErrPermissionDenied
- }
-
- // root role should have permission on all ranges
- if hasRootRole(user) {
- return nil
- }
-
- if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
- return nil
- }
-
- return ErrPermissionDenied
-}
-
-func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
- return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
-}
-
-func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
- return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
-}
-
-func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
- return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
-}
-
-func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
- if !as.isAuthEnabled() {
- return nil
- }
- if authInfo == nil {
- return ErrUserEmpty
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- u := getUser(tx, authInfo.Username)
- tx.Unlock()
-
- if u == nil {
- return ErrUserNotFound
- }
-
- if !hasRootRole(u) {
- return ErrPermissionDenied
- }
-
- return nil
-}
-
-func getUser(tx backend.BatchTx, username string) *authpb.User {
- _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0)
- if len(vs) == 0 {
- return nil
- }
-
- user := &authpb.User{}
- err := user.Unmarshal(vs[0])
- if err != nil {
- plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err)
- }
- return user
-}
-
-func getAllUsers(tx backend.BatchTx) []*authpb.User {
- _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
- if len(vs) == 0 {
- return nil
- }
-
- users := make([]*authpb.User, len(vs))
- for i := range vs {
- user := &authpb.User{}
- err := user.Unmarshal(vs[i])
- if err != nil {
- plog.Panicf("failed to unmarshal user struct: %s", err)
- }
- users[i] = user
- }
- return users
-}
-
-func putUser(tx backend.BatchTx, user *authpb.User) {
- b, err := user.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err)
- }
- tx.UnsafePut(authUsersBucketName, user.Name, b)
-}
-
-func delUser(tx backend.BatchTx, username string) {
- tx.UnsafeDelete(authUsersBucketName, []byte(username))
-}
-
-func getRole(tx backend.BatchTx, rolename string) *authpb.Role {
- _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0)
- if len(vs) == 0 {
- return nil
- }
-
- role := &authpb.Role{}
- err := role.Unmarshal(vs[0])
- if err != nil {
- plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err)
- }
- return role
-}
-
-func getAllRoles(tx backend.BatchTx) []*authpb.Role {
- _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1)
- if len(vs) == 0 {
- return nil
- }
-
- roles := make([]*authpb.Role, len(vs))
- for i := range vs {
- role := &authpb.Role{}
- err := role.Unmarshal(vs[i])
- if err != nil {
- plog.Panicf("failed to unmarshal role struct: %s", err)
- }
- roles[i] = role
- }
- return roles
-}
-
-func putRole(tx backend.BatchTx, role *authpb.Role) {
- b, err := role.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err)
- }
-
- tx.UnsafePut(authRolesBucketName, []byte(role.Name), b)
-}
-
-func delRole(tx backend.BatchTx, rolename string) {
- tx.UnsafeDelete(authRolesBucketName, []byte(rolename))
-}
-
-func (as *authStore) isAuthEnabled() bool {
- as.enabledMu.RLock()
- defer as.enabledMu.RUnlock()
- return as.enabled
-}
-
-func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore {
- tx := be.BatchTx()
- tx.Lock()
-
- tx.UnsafeCreateBucket(authBucketName)
- tx.UnsafeCreateBucket(authUsersBucketName)
- tx.UnsafeCreateBucket(authRolesBucketName)
-
- enabled := false
- _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
- if len(vs) == 1 {
- if bytes.Equal(vs[0], authEnabled) {
- enabled = true
- }
- }
-
- as := &authStore{
- be: be,
- revision: getRevision(tx),
- enabled: enabled,
- rangePermCache: make(map[string]*unifiedRangePermissions),
- tokenProvider: tp,
- }
-
- if enabled {
- as.tokenProvider.enable()
- }
-
- if as.Revision() == 0 {
- as.commitRevision(tx)
- }
-
- tx.Unlock()
- be.ForceCommit()
-
- return as
-}
-
-func hasRootRole(u *authpb.User) bool {
- // u.Roles is sorted in UserGrantRole(), so we can use binary search.
- idx := sort.SearchStrings(u.Roles, rootRole)
- return idx != len(u.Roles) && u.Roles[idx] == rootRole
-}
-
-func (as *authStore) commitRevision(tx backend.BatchTx) {
- atomic.AddUint64(&as.revision, 1)
- revBytes := make([]byte, revBytesLen)
- binary.BigEndian.PutUint64(revBytes, as.Revision())
- tx.UnsafePut(authBucketName, revisionKey, revBytes)
-}
-
-func getRevision(tx backend.BatchTx) uint64 {
- _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
- if len(vs) != 1 {
- // this can happen in the initialization phase
- return 0
- }
-
- return binary.BigEndian.Uint64(vs[0])
-}
-
-func (as *authStore) setRevision(rev uint64) {
- atomic.StoreUint64(&as.revision, rev)
-}
-
-func (as *authStore) Revision() uint64 {
- return atomic.LoadUint64(&as.revision)
-}
-
-func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
- peer, ok := peer.FromContext(ctx)
- if !ok || peer == nil || peer.AuthInfo == nil {
- return nil
- }
-
- tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
- for _, chains := range tlsInfo.State.VerifiedChains {
- for _, chain := range chains {
- cn := chain.Subject.CommonName
- plog.Debugf("found common name %s", cn)
-
- ai := &AuthInfo{
- Username: cn,
- Revision: as.Revision(),
- }
- md, ok := metadata.FromIncomingContext(ctx)
- if !ok {
- return nil
- }
-
- // gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept
- // header. The proxy uses etcd client server certificate. If the certificate
- // has a CommonName we should never use this for authentication.
- if gw := md["grpcgateway-accept"]; len(gw) > 0 {
- plog.Warningf("ignoring common name in gRPC-gateway proxy request %s", ai.Username)
- return nil
- }
- return ai
- }
- }
-
- return nil
-}
-
-func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
- md, ok := metadata.FromIncomingContext(ctx)
- if !ok {
- return nil, nil
- }
-
- //TODO(mitake|hexfusion) review unifying key names
- ts, ok := md["token"]
- if !ok {
- ts, ok = md["authorization"]
- }
- if !ok {
- return nil, nil
- }
-
- token := ts[0]
- authInfo, uok := as.authInfoFromToken(ctx, token)
- if !uok {
- plog.Warningf("invalid auth token: %s", token)
- return nil, ErrInvalidAuthToken
- }
-
- return authInfo, nil
-}
-
-func (as *authStore) GenTokenPrefix() (string, error) {
- return as.tokenProvider.genTokenPrefix()
-}
-
-func decomposeOpts(optstr string) (string, map[string]string, error) {
- opts := strings.Split(optstr, ",")
- tokenType := opts[0]
-
- typeSpecificOpts := make(map[string]string)
- for i := 1; i < len(opts); i++ {
- pair := strings.Split(opts[i], "=")
-
- if len(pair) != 2 {
- plog.Errorf("invalid token specific option: %s", optstr)
- return "", nil, ErrInvalidAuthOpts
- }
-
- if _, ok := typeSpecificOpts[pair[0]]; ok {
- plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr)
- return "", nil, ErrInvalidAuthOpts
- }
-
- typeSpecificOpts[pair[0]] = pair[1]
- }
-
- return tokenType, typeSpecificOpts, nil
-
-}
-
-func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
- tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts)
- if err != nil {
- return nil, ErrInvalidAuthOpts
- }
-
- switch tokenType {
- case tokenTypeSimple:
- plog.Warningf("simple token is not cryptographically signed")
- return newTokenProviderSimple(indexWaiter), nil
-
- case tokenTypeJWT:
- return newTokenProviderJWT(typeSpecificOpts)
-
- case "":
- return newTokenProviderNop()
- default:
- plog.Errorf("unknown token type: %s", tokenType)
- return nil, ErrInvalidAuthOpts
- }
-}
-
-func (as *authStore) WithRoot(ctx context.Context) context.Context {
- if !as.isAuthEnabled() {
- return ctx
- }
-
- var ctxForAssign context.Context
- if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil {
- ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0))
- prefix, err := ts.genTokenPrefix()
- if err != nil {
- plog.Errorf("failed to generate prefix of internally used token")
- return ctx
- }
- ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix)
- } else {
- ctxForAssign = ctx
- }
-
- token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision())
- if err != nil {
- // this must not happen
- plog.Errorf("failed to assign token for lease revoking: %s", err)
- return ctx
- }
-
- mdMap := map[string]string{
- "token": token,
- }
- tokenMD := metadata.New(mdMap)
-
- // use "mdIncomingKey{}" since it's called from local etcdserver
- return metadata.NewIncomingContext(ctx, tokenMD)
-}
-
-func (as *authStore) HasRole(user, role string) bool {
- tx := as.be.BatchTx()
- tx.Lock()
- u := getUser(tx, user)
- tx.Unlock()
-
- if u == nil {
- plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user)
- return false
- }
-
- for _, r := range u.Roles {
- if role == r {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/bill-of-materials.json b/vendor/github.com/coreos/etcd/bill-of-materials.json
deleted file mode 100644
index 98e2822..0000000
--- a/vendor/github.com/coreos/etcd/bill-of-materials.json
+++ /dev/null
@@ -1,451 +0,0 @@
-[
- {
- "project": "bitbucket.org/ww/goautoneg",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/beorn7/perks/quantile",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/bgentry/speakeasy",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9441624365482234
- }
- ]
- },
- {
- "project": "github.com/coreos/bbolt",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/coreos/etcd",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/coreos/go-semver/semver",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/coreos/go-systemd",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 0.9966703662597114
- }
- ]
- },
- {
- "project": "github.com/coreos/pkg",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/cpuguy83/go-md2man/md2man",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/dgrijalva/jwt-go",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/dustin/go-humanize",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.96875
- }
- ]
- },
- {
- "project": "github.com/ghodss/yaml",
- "licenses": [
- {
- "type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/gogo/protobuf",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9090909090909091
- }
- ]
- },
- {
- "project": "github.com/golang/groupcache/lru",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 0.9966703662597114
- }
- ]
- },
- {
- "project": "github.com/golang/protobuf",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.92
- }
- ]
- },
- {
- "project": "github.com/google/btree",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/gorilla/websocket",
- "licenses": [
- {
- "type": "BSD 2-clause \"Simplified\" License",
- "confidence": 0.9852216748768473
- }
- ]
- },
- {
- "project": "github.com/grpc-ecosystem/go-grpc-prometheus",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/grpc-ecosystem/grpc-gateway",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.979253112033195
- }
- ]
- },
- {
- "project": "github.com/inconshreveable/mousetrap",
- "licenses": [
- {
- "type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 1
- },
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/jonboulle/clockwork",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/mattn/go-runewidth",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/matttproud/golang_protobuf_extensions/pbutil",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/olekukonko/tablewriter",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/prometheus/client_golang/prometheus",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/prometheus/client_model/go",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/prometheus/common",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/prometheus/procfs",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/russross/blackfriday",
- "licenses": [
- {
- "type": "BSD 2-clause \"Simplified\" License",
- "confidence": 0.9626168224299065
- }
- ]
- },
- {
- "project": "github.com/sirupsen/logrus",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/soheilhy/cmux",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/spf13/cobra",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 0.9573241061130334
- }
- ]
- },
- {
- "project": "github.com/spf13/pflag",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "github.com/tmc/grpc-websocket-proxy/wsproxy",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "github.com/ugorji/go/codec",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9946524064171123
- }
- ]
- },
- {
- "project": "github.com/urfave/cli",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "github.com/xiang90/probing",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 1
- }
- ]
- },
- {
- "project": "go.uber.org/atomic",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "go.uber.org/multierr",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "go.uber.org/zap",
- "licenses": [
- {
- "type": "MIT License",
- "confidence": 0.9891304347826086
- }
- ]
- },
- {
- "project": "golang.org/x/crypto",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/net",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/sys/unix",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/text",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "golang.org/x/time/rate",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9663865546218487
- }
- ]
- },
- {
- "project": "google.golang.org/genproto/googleapis/rpc/status",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "google.golang.org/grpc",
- "licenses": [
- {
- "type": "Apache License 2.0",
- "confidence": 1
- }
- ]
- },
- {
- "project": "gopkg.in/cheggaaa/pb.v1",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License",
- "confidence": 0.9916666666666667
- }
- ]
- },
- {
- "project": "gopkg.in/yaml.v2",
- "licenses": [
- {
- "type": "The Unlicense",
- "confidence": 0.35294117647058826
- },
- {
- "type": "MIT License",
- "confidence": 0.8975609756097561
- }
- ]
- }
-]
diff --git a/vendor/github.com/coreos/etcd/bill-of-materials.override.json b/vendor/github.com/coreos/etcd/bill-of-materials.override.json
deleted file mode 100644
index 34de90e..0000000
--- a/vendor/github.com/coreos/etcd/bill-of-materials.override.json
+++ /dev/null
@@ -1,26 +0,0 @@
-[
- {
- "project": "bitbucket.org/ww/goautoneg",
- "licenses": [
- {
- "type": "BSD 3-clause \"New\" or \"Revised\" License"
- }
- ]
- },
- {
- "project": "github.com/ghodss/yaml",
- "licenses": [
- {
- "type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License"
- }
- ]
- },
- {
- "project": "github.com/inconshreveable/mousetrap",
- "licenses": [
- {
- "type": "Apache License 2.0"
- }
- ]
- }
-]
diff --git a/vendor/github.com/coreos/etcd/build b/vendor/github.com/coreos/etcd/build
deleted file mode 100755
index b233d32..0000000
--- a/vendor/github.com/coreos/etcd/build
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/sh -e
-
-# set some environment variables
-ORG_PATH="github.com/coreos"
-REPO_PATH="${ORG_PATH}/etcd"
-
-GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound")
-if [ ! -z "$FAILPOINTS" ]; then
- GIT_SHA="$GIT_SHA"-FAILPOINTS
-fi
-
-# Set GO_LDFLAGS="-s" for building without symbols for debugging.
-GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=${GIT_SHA}"
-
-# enable/disable failpoints
-toggle_failpoints() {
- mode="$1"
- if which gofail >/dev/null 2>&1; then
- gofail "$mode" etcdserver/ mvcc/backend/
- elif [ "$mode" != "disable" ]; then
- echo "FAILPOINTS set but gofail not found"
- exit 1
- fi
-}
-
-toggle_failpoints_default() {
- mode="disable"
- if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi
- toggle_failpoints "$mode"
-}
-
-etcd_build() {
- out="bin"
- if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi
- toggle_failpoints_default
- # Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK
-
- # shellcheck disable=SC2086
- CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o "${out}/etcd" ${REPO_PATH}/cmd/etcd || return
- # shellcheck disable=SC2086
- CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o "${out}/etcdctl" ${REPO_PATH}/cmd/etcdctl || return
-}
-
-etcd_setup_gopath() {
- d=$(dirname "$0")
- CDIR=$(cd "$d" && pwd)
- cd "$CDIR"
- etcdGOPATH="${CDIR}/gopath"
- # preserve old gopath to support building with unvendored tooling deps (e.g., gofail)
- if [ -n "$GOPATH" ]; then
- GOPATH=":$GOPATH"
- fi
- export GOPATH=${etcdGOPATH}$GOPATH
- rm -rf "${etcdGOPATH}/src"
- mkdir -p "${etcdGOPATH}"
- ln -s "${CDIR}/cmd/vendor" "${etcdGOPATH}/src"
-}
-
-toggle_failpoints_default
-
-# only build when called directly, not sourced
-if echo "$0" | grep "build$" >/dev/null; then
- # force new gopath so builds outside of gopath work
- etcd_setup_gopath
- etcd_build
-fi
diff --git a/vendor/github.com/coreos/etcd/build.bat b/vendor/github.com/coreos/etcd/build.bat
deleted file mode 100755
index ff9b209..0000000
--- a/vendor/github.com/coreos/etcd/build.bat
+++ /dev/null
@@ -1 +0,0 @@
-powershell -ExecutionPolicy Bypass -File build.ps1
diff --git a/vendor/github.com/coreos/etcd/build.ps1 b/vendor/github.com/coreos/etcd/build.ps1
deleted file mode 100644
index 455d37d..0000000
--- a/vendor/github.com/coreos/etcd/build.ps1
+++ /dev/null
@@ -1,81 +0,0 @@
-$ORG_PATH="github.com/coreos"
-$REPO_PATH="$ORG_PATH/etcd"
-$PWD = $((Get-Item -Path ".\" -Verbose).FullName)
-$FSROOT = $((Get-Location).Drive.Name+":")
-$FSYS = $((Get-WMIObject win32_logicaldisk -filter "DeviceID = '$FSROOT'").filesystem)
-
-if ($FSYS.StartsWith("FAT","CurrentCultureIgnoreCase")) {
- echo "Error: Cannot build etcd using the $FSYS filesystem (use NTFS instead)"
- exit 1
-}
-
-# Set $Env:GO_LDFLAGS="-s" for building without symbols.
-$GO_LDFLAGS="$Env:GO_LDFLAGS -X $REPO_PATH/cmd/vendor/$REPO_PATH/version.GitSHA=$GIT_SHA"
-
-# rebuild symlinks
-git ls-files -s cmd | select-string -pattern 120000 | ForEach {
- $l = $_.ToString()
- $lnkname = $l.Split(' ')[1]
- $target = "$(git log -p HEAD -- $lnkname | select -last 2 | select -first 1)"
- $target = $target.SubString(1,$target.Length-1).Replace("/","\")
- $lnkname = $lnkname.Replace("/","\")
-
- $terms = $lnkname.Split("\")
- $dirname = $terms[0..($terms.length-2)] -join "\"
- $lnkname = "$PWD\$lnkname"
- $targetAbs = "$((Get-Item -Path "$dirname\$target").FullName)"
- $targetAbs = $targetAbs.Replace("/", "\")
-
- if (test-path -pathtype container "$targetAbs") {
- if (Test-Path "$lnkname") {
- if ((Get-Item "$lnkname") -is [System.IO.DirectoryInfo]) {
- # rd so deleting junction doesn't take files with it
- cmd /c rd "$lnkname"
- }
- }
- if (Test-Path "$lnkname") {
- if (!((Get-Item "$lnkname") -is [System.IO.DirectoryInfo])) {
- cmd /c del /A /F "$lnkname"
- }
- }
- cmd /c mklink /J "$lnkname" "$targetAbs" ">NUL"
- } else {
- # Remove file with symlink data (first run)
- if (Test-Path "$lnkname") {
- cmd /c del /A /F "$lnkname"
- }
- cmd /c mklink /H "$lnkname" "$targetAbs" ">NUL"
- }
-}
-
-if (-not $env:GOPATH) {
- $orgpath="$PWD\gopath\src\" + $ORG_PATH.Replace("/", "\")
- if (Test-Path "$orgpath\etcd") {
- if ((Get-Item "$orgpath\etcd") -is [System.IO.DirectoryInfo]) {
- # rd so deleting junction doesn't take files with it
- cmd /c rd "$orgpath\etcd"
- }
- }
- if (Test-Path "$orgpath") {
- if ((Get-Item "$orgpath") -is [System.IO.DirectoryInfo]) {
- # rd so deleting junction doesn't take files with it
- cmd /c rd "$orgpath"
- }
- }
- if (Test-Path "$orgpath") {
- if (!((Get-Item "$orgpath") -is [System.IO.DirectoryInfo])) {
- # Remove file with symlink data (first run)
- cmd /c del /A /F "$orgpath"
- }
- }
- cmd /c mkdir "$orgpath"
- cmd /c mklink /J "$orgpath\etcd" "$PWD" ">NUL"
- $env:GOPATH = "$PWD\gopath"
-}
-
-# Static compilation is useful when etcd is run in a container
-$env:CGO_ENABLED = 0
-$env:GO15VENDOREXPERIMENT = 1
-$GIT_SHA="$(git rev-parse --short HEAD)"
-go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcd.exe "$REPO_PATH\cmd\etcd"
-go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcdctl.exe "$REPO_PATH\cmd\etcdctl"
diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md
deleted file mode 100644
index 2be731e..0000000
--- a/vendor/github.com/coreos/etcd/client/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# etcd/client
-
-etcd/client is the Go client library for etcd.
-
-[](https://godoc.org/github.com/coreos/etcd/client)
-
-etcd uses `cmd/vendor` directory to store external dependencies, which are
-to be compiled into etcd release binaries. `client` can be imported without
-vendoring. For full compatibility, it is recommended to vendor builds using
-etcd's vendored packages, using tools like godep, as in
-[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
-For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
-
-## Install
-
-```bash
-go get github.com/coreos/etcd/client
-```
-
-## Usage
-
-```go
-package main
-
-import (
- "log"
- "time"
- "context"
-
- "github.com/coreos/etcd/client"
-)
-
-func main() {
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: client.DefaultTransport,
- // set timeout per request to fail fast when the target endpoint is unavailable
- HeaderTimeoutPerRequest: time.Second,
- }
- c, err := client.New(cfg)
- if err != nil {
- log.Fatal(err)
- }
- kapi := client.NewKeysAPI(c)
- // set "/foo" key with "bar" value
- log.Print("Setting '/foo' key with 'bar' value")
- resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Set is done. Metadata is %q\n", resp)
- }
- // get "/foo" key's value
- log.Print("Getting '/foo' key value")
- resp, err = kapi.Get(context.Background(), "/foo", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Get is done. Metadata is %q\n", resp)
- // print value
- log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
- }
-}
-```
-
-## Error Handling
-
-etcd client might return three types of errors.
-
-- context error
-
-Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
-
-- cluster error
-
-Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
-
-- response error
-
-If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
-
-Here is the example code to handle client errors:
-
-```go
-cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
-c, err := client.New(cfg)
-if err != nil {
- log.Fatal(err)
-}
-
-kapi := client.NewKeysAPI(c)
-resp, err := kapi.Set(ctx, "test", "bar", nil)
-if err != nil {
- if err == context.Canceled {
- // ctx is canceled by another routine
- } else if err == context.DeadlineExceeded {
- // ctx is attached with a deadline and it exceeded
- } else if cerr, ok := err.(*client.ClusterError); ok {
- // process (cerr.Errors)
- } else {
- // bad cluster endpoints, which are not etcd servers
- }
-}
-```
-
-
-## Caveat
-
-1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
-
-2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
-
-3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
-
-4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go
deleted file mode 100644
index b6ba7e1..0000000
--- a/vendor/github.com/coreos/etcd/client/auth_role.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
-)
-
-type Role struct {
- Role string `json:"role"`
- Permissions Permissions `json:"permissions"`
- Grant *Permissions `json:"grant,omitempty"`
- Revoke *Permissions `json:"revoke,omitempty"`
-}
-
-type Permissions struct {
- KV rwPermission `json:"kv"`
-}
-
-type rwPermission struct {
- Read []string `json:"read"`
- Write []string `json:"write"`
-}
-
-type PermissionType int
-
-const (
- ReadPermission PermissionType = iota
- WritePermission
- ReadWritePermission
-)
-
-// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
-// interact with etcd's role creation and modification features.
-func NewAuthRoleAPI(c Client) AuthRoleAPI {
- return &httpAuthRoleAPI{
- client: c,
- }
-}
-
-type AuthRoleAPI interface {
- // AddRole adds a role.
- AddRole(ctx context.Context, role string) error
-
- // RemoveRole removes a role.
- RemoveRole(ctx context.Context, role string) error
-
- // GetRole retrieves role details.
- GetRole(ctx context.Context, role string) (*Role, error)
-
- // GrantRoleKV grants a role some permission prefixes for the KV store.
- GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // RevokeRoleKV revokes some permission prefixes for a role on the KV store.
- RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // ListRoles lists roles.
- ListRoles(ctx context.Context) ([]string, error)
-}
-
-type httpAuthRoleAPI struct {
- client httpClient
-}
-
-type authRoleAPIAction struct {
- verb string
- name string
- role *Role
-}
-
-type authRoleAPIList struct{}
-
-func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", l.name)
- if l.role == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.role)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
- resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
- var roleList struct {
- Roles []Role `json:"roles"`
- }
- if err = json.Unmarshal(body, &roleList); err != nil {
- return nil, err
- }
- ret := make([]string, 0, len(roleList.Roles))
- for _, r := range roleList.Roles {
- ret = append(ret, r.Role)
- }
- return ret, nil
-}
-
-func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
- role := &Role{
- Role: rolename,
- }
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "DELETE",
- name: rolename,
- })
-}
-
-func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err := json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "GET",
- name: rolename,
- })
-}
-
-func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
- var out rwPermission
- switch permType {
- case ReadPermission:
- out.Read = prefixes
- case WritePermission:
- out.Write = prefixes
- case ReadWritePermission:
- out.Read = prefixes
- out.Write = prefixes
- }
- return out
-}
-
-func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Grant: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Revoke: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var role Role
- if err = json.Unmarshal(body, &role); err != nil {
- return nil, err
- }
- return &role, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go
deleted file mode 100644
index 8e7e2ef..0000000
--- a/vendor/github.com/coreos/etcd/client/auth_user.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
- "path"
-)
-
-var (
- defaultV2AuthPrefix = "/v2/auth"
-)
-
-type User struct {
- User string `json:"user"`
- Password string `json:"password,omitempty"`
- Roles []string `json:"roles"`
- Grant []string `json:"grant,omitempty"`
- Revoke []string `json:"revoke,omitempty"`
-}
-
-// userListEntry is the user representation given by the server for ListUsers
-type userListEntry struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-type UserRoles struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-func v2AuthURL(ep url.URL, action string, name string) *url.URL {
- if name != "" {
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
- return &ep
- }
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
- return &ep
-}
-
-// NewAuthAPI constructs a new AuthAPI that uses HTTP to
-// interact with etcd's general auth features.
-func NewAuthAPI(c Client) AuthAPI {
- return &httpAuthAPI{
- client: c,
- }
-}
-
-type AuthAPI interface {
- // Enable auth.
- Enable(ctx context.Context) error
-
- // Disable auth.
- Disable(ctx context.Context) error
-}
-
-type httpAuthAPI struct {
- client httpClient
-}
-
-func (s *httpAuthAPI) Enable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"PUT"})
-}
-
-func (s *httpAuthAPI) Disable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"DELETE"})
-}
-
-func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
- resp, body, err := s.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-type authAPIAction struct {
- verb string
-}
-
-func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "enable", "")
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
-}
-
-type authError struct {
- Message string `json:"message"`
- Code int `json:"-"`
-}
-
-func (e authError) Error() string {
- return e.Message
-}
-
-// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
-// interact with etcd's user creation and modification features.
-func NewAuthUserAPI(c Client) AuthUserAPI {
- return &httpAuthUserAPI{
- client: c,
- }
-}
-
-type AuthUserAPI interface {
- // AddUser adds a user.
- AddUser(ctx context.Context, username string, password string) error
-
- // RemoveUser removes a user.
- RemoveUser(ctx context.Context, username string) error
-
- // GetUser retrieves user details.
- GetUser(ctx context.Context, username string) (*User, error)
-
- // GrantUser grants a user some permission roles.
- GrantUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // RevokeUser revokes some permission roles from a user.
- RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // ChangePassword changes the user's password.
- ChangePassword(ctx context.Context, username string, password string) (*User, error)
-
- // ListUsers lists the users.
- ListUsers(ctx context.Context) ([]string, error)
-}
-
-type httpAuthUserAPI struct {
- client httpClient
-}
-
-type authUserAPIAction struct {
- verb string
- username string
- user *User
-}
-
-type authUserAPIList struct{}
-
-func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", l.username)
- if l.user == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.user)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
- resp, body, err := u.client.Do(ctx, &authUserAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
-
- var userList struct {
- Users []userListEntry `json:"users"`
- }
-
- if err = json.Unmarshal(body, &userList); err != nil {
- return nil, err
- }
-
- ret := make([]string, 0, len(userList.Users))
- for _, u := range userList.Users {
- ret = append(ret, u.User)
- }
- return ret, nil
-}
-
-func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
- user := &User{
- User: username,
- Password: password,
- }
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "DELETE",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
- return u.modUser(ctx, &authUserAPIAction{
- verb: "GET",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Grant: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Revoke: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
- user := &User{
- User: username,
- Password: password,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var user User
- if err = json.Unmarshal(body, &user); err != nil {
- var userR UserRoles
- if urerr := json.Unmarshal(body, &userR); urerr != nil {
- return nil, err
- }
- user.User = userR.User
- for _, r := range userR.Roles {
- user.Roles = append(user.Roles, r.Role)
- }
- }
- return &user, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go
deleted file mode 100644
index 76d1f04..0000000
--- a/vendor/github.com/coreos/etcd/client/cancelreq.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// borrowed from golang/net/context/ctxhttp/cancelreq.go
-
-package client
-
-import "net/http"
-
-func requestCanceler(tr CancelableTransport, req *http.Request) func() {
- ch := make(chan struct{})
- req.Cancel = ch
-
- return func() {
- close(ch)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go
deleted file mode 100644
index e687450..0000000
--- a/vendor/github.com/coreos/etcd/client/client.go
+++ /dev/null
@@ -1,710 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "math/rand"
- "net"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "sync"
- "time"
-
- "github.com/coreos/etcd/version"
-)
-
-var (
- ErrNoEndpoints = errors.New("client: no endpoints available")
- ErrTooManyRedirects = errors.New("client: too many redirects")
- ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
- ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
- errTooManyRedirectChecks = errors.New("client: too many redirect checks")
-
- // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
- // that Do() will not retry a request
- oneShotCtxValue interface{}
-)
-
-var DefaultRequestTimeout = 5 * time.Second
-
-var DefaultTransport CancelableTransport = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
-}
-
-type EndpointSelectionMode int
-
-const (
- // EndpointSelectionRandom is the default value of the 'SelectionMode'.
- // As the name implies, the client object will pick a node from the members
- // of the cluster in a random fashion. If the cluster has three members, A, B,
- // and C, the client picks any node from its three members as its request
- // destination.
- EndpointSelectionRandom EndpointSelectionMode = iota
-
- // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
- // requests are sent directly to the cluster leader. This reduces
- // forwarding roundtrips compared to making requests to etcd followers
- // who then forward them to the cluster leader. In the event of a leader
- // failure, however, clients configured this way cannot prioritize among
- // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
- // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
- // maintain its knowledge of current cluster state.
- //
- // This mode should be used with Client.AutoSync().
- EndpointSelectionPrioritizeLeader
-)
-
-type Config struct {
- // Endpoints defines a set of URLs (schemes, hosts and ports only)
- // that can be used to communicate with a logical etcd cluster. For
- // example, a three-node cluster could be provided like so:
- //
- // Endpoints: []string{
- // "http://node1.example.com:2379",
- // "http://node2.example.com:2379",
- // "http://node3.example.com:2379",
- // }
- //
- // If multiple endpoints are provided, the Client will attempt to
- // use them all in the event that one or more of them are unusable.
- //
- // If Client.Sync is ever called, the Client may cache an alternate
- // set of endpoints to continue operation.
- Endpoints []string
-
- // Transport is used by the Client to drive HTTP requests. If not
- // provided, DefaultTransport will be used.
- Transport CancelableTransport
-
- // CheckRedirect specifies the policy for handling HTTP redirects.
- // If CheckRedirect is not nil, the Client calls it before
- // following an HTTP redirect. The sole argument is the number of
- // requests that have already been made. If CheckRedirect returns
- // an error, Client.Do will not make any further requests and return
- // the error back it to the caller.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect CheckRedirectFunc
-
- // Username specifies the user credential to add as an authorization header
- Username string
-
- // Password is the password for the specified user to add as an authorization header
- // to the request.
- Password string
-
- // HeaderTimeoutPerRequest specifies the time limit to wait for response
- // header in a single request made by the Client. The timeout includes
- // connection time, any redirects, and header wait time.
- //
- // For non-watch GET request, server returns the response body immediately.
- // For PUT/POST/DELETE request, server will attempt to commit request
- // before responding, which is expected to take `100ms + 2 * RTT`.
- // For watch request, server returns the header immediately to notify Client
- // watch start. But if server is behind some kind of proxy, the response
- // header may be cached at proxy, and Client cannot rely on this behavior.
- //
- // Especially, wait request will ignore this timeout.
- //
- // One API call may send multiple requests to different etcd servers until it
- // succeeds. Use context of the API to specify the overall timeout.
- //
- // A HeaderTimeoutPerRequest of zero means no timeout.
- HeaderTimeoutPerRequest time.Duration
-
- // SelectionMode is an EndpointSelectionMode enum that specifies the
- // policy for choosing the etcd cluster node to which requests are sent.
- SelectionMode EndpointSelectionMode
-}
-
-func (cfg *Config) transport() CancelableTransport {
- if cfg.Transport == nil {
- return DefaultTransport
- }
- return cfg.Transport
-}
-
-func (cfg *Config) checkRedirect() CheckRedirectFunc {
- if cfg.CheckRedirect == nil {
- return DefaultCheckRedirect
- }
- return cfg.CheckRedirect
-}
-
-// CancelableTransport mimics net/http.Transport, but requires that
-// the object also support request cancellation.
-type CancelableTransport interface {
- http.RoundTripper
- CancelRequest(req *http.Request)
-}
-
-type CheckRedirectFunc func(via int) error
-
-// DefaultCheckRedirect follows up to 10 redirects, but no more.
-var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
- if via > 10 {
- return ErrTooManyRedirects
- }
- return nil
-}
-
-type Client interface {
- // Sync updates the internal cache of the etcd cluster's membership.
- Sync(context.Context) error
-
- // AutoSync periodically calls Sync() every given interval.
- // The recommended sync interval is 10 seconds to 1 minute, which does
- // not bring too much overhead to server and makes client catch up the
- // cluster change in time.
- //
- // The example to use it:
- //
- // for {
- // err := client.AutoSync(ctx, 10*time.Second)
- // if err == context.DeadlineExceeded || err == context.Canceled {
- // break
- // }
- // log.Print(err)
- // }
- AutoSync(context.Context, time.Duration) error
-
- // Endpoints returns a copy of the current set of API endpoints used
- // by Client to resolve HTTP requests. If Sync has ever been called,
- // this may differ from the initial Endpoints provided in the Config.
- Endpoints() []string
-
- // SetEndpoints sets the set of API endpoints used by Client to resolve
- // HTTP requests. If the given endpoints are not valid, an error will be
- // returned
- SetEndpoints(eps []string) error
-
- // GetVersion retrieves the current etcd server and cluster version
- GetVersion(ctx context.Context) (*version.Versions, error)
-
- httpClient
-}
-
-func New(cfg Config) (Client, error) {
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
- rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- selectionMode: cfg.SelectionMode,
- }
- if cfg.Username != "" {
- c.credentials = &credentials{
- username: cfg.Username,
- password: cfg.Password,
- }
- }
- if err := c.SetEndpoints(cfg.Endpoints); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type httpClient interface {
- Do(context.Context, httpAction) (*http.Response, []byte, error)
-}
-
-func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
- return func(ep url.URL) httpClient {
- return &redirectFollowingHTTPClient{
- checkRedirect: cr,
- client: &simpleHTTPClient{
- transport: tr,
- endpoint: ep,
- headerTimeout: headerTimeout,
- },
- }
- }
-}
-
-type credentials struct {
- username string
- password string
-}
-
-type httpClientFactory func(url.URL) httpClient
-
-type httpAction interface {
- HTTPRequest(url.URL) *http.Request
-}
-
-type httpClusterClient struct {
- clientFactory httpClientFactory
- endpoints []url.URL
- pinned int
- credentials *credentials
- sync.RWMutex
- rand *rand.Rand
- selectionMode EndpointSelectionMode
-}
-
-func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
- ceps := make([]url.URL, len(eps))
- copy(ceps, eps)
-
- // To perform a lookup on the new endpoint list without using the current
- // client, we'll copy it
- clientCopy := &httpClusterClient{
- clientFactory: c.clientFactory,
- credentials: c.credentials,
- rand: c.rand,
-
- pinned: 0,
- endpoints: ceps,
- }
-
- mAPI := NewMembersAPI(clientCopy)
- leader, err := mAPI.Leader(ctx)
- if err != nil {
- return "", err
- }
- if len(leader.ClientURLs) == 0 {
- return "", ErrNoLeaderEndpoint
- }
-
- return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
-}
-
-func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
- if len(eps) == 0 {
- return []url.URL{}, ErrNoEndpoints
- }
-
- neps := make([]url.URL, len(eps))
- for i, ep := range eps {
- u, err := url.Parse(ep)
- if err != nil {
- return []url.URL{}, err
- }
- neps[i] = *u
- }
- return neps, nil
-}
-
-func (c *httpClusterClient) SetEndpoints(eps []string) error {
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- c.Lock()
- defer c.Unlock()
-
- c.endpoints = shuffleEndpoints(c.rand, neps)
- // We're not doing anything for PrioritizeLeader here. This is
- // due to not having a context meaning we can't call getLeaderEndpoint
- // However, if you're using PrioritizeLeader, you've already been told
- // to regularly call sync, where we do have a ctx, and can figure the
- // leader. PrioritizeLeader is also quite a loose guarantee, so deal
- // with it
- c.pinned = 0
-
- return nil
-}
-
-func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- action := act
- c.RLock()
- leps := len(c.endpoints)
- eps := make([]url.URL, leps)
- n := copy(eps, c.endpoints)
- pinned := c.pinned
-
- if c.credentials != nil {
- action = &authedAction{
- act: act,
- credentials: *c.credentials,
- }
- }
- c.RUnlock()
-
- if leps == 0 {
- return nil, nil, ErrNoEndpoints
- }
-
- if leps != n {
- return nil, nil, errors.New("unable to pick endpoint: copy failed")
- }
-
- var resp *http.Response
- var body []byte
- var err error
- cerr := &ClusterError{}
- isOneShot := ctx.Value(&oneShotCtxValue) != nil
-
- for i := pinned; i < leps+pinned; i++ {
- k := i % leps
- hc := c.clientFactory(eps[k])
- resp, body, err = hc.Do(ctx, action)
- if err != nil {
- cerr.Errors = append(cerr.Errors, err)
- if err == ctx.Err() {
- return nil, nil, ctx.Err()
- }
- if err == context.Canceled || err == context.DeadlineExceeded {
- return nil, nil, err
- }
- } else if resp.StatusCode/100 == 5 {
- switch resp.StatusCode {
- case http.StatusInternalServerError, http.StatusServiceUnavailable:
- // TODO: make sure this is a no leader response
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
- default:
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
- }
- err = cerr.Errors[0]
- }
- if err != nil {
- if !isOneShot {
- continue
- }
- c.Lock()
- c.pinned = (k + 1) % leps
- c.Unlock()
- return nil, nil, err
- }
- if k != pinned {
- c.Lock()
- c.pinned = k
- c.Unlock()
- }
- return resp, body, nil
- }
-
- return nil, nil, cerr
-}
-
-func (c *httpClusterClient) Endpoints() []string {
- c.RLock()
- defer c.RUnlock()
-
- eps := make([]string, len(c.endpoints))
- for i, ep := range c.endpoints {
- eps[i] = ep.String()
- }
-
- return eps
-}
-
-func (c *httpClusterClient) Sync(ctx context.Context) error {
- mAPI := NewMembersAPI(c)
- ms, err := mAPI.List(ctx)
- if err != nil {
- return err
- }
-
- var eps []string
- for _, m := range ms {
- eps = append(eps, m.ClientURLs...)
- }
-
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- npin := 0
-
- switch c.selectionMode {
- case EndpointSelectionRandom:
- c.RLock()
- eq := endpointsEqual(c.endpoints, neps)
- c.RUnlock()
-
- if eq {
- return nil
- }
- // When items in the endpoint list changes, we choose a new pin
- neps = shuffleEndpoints(c.rand, neps)
- case EndpointSelectionPrioritizeLeader:
- nle, err := c.getLeaderEndpoint(ctx, neps)
- if err != nil {
- return ErrNoLeaderEndpoint
- }
-
- for i, n := range neps {
- if n.String() == nle {
- npin = i
- break
- }
- }
- default:
- return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
- }
-
- c.Lock()
- defer c.Unlock()
- c.endpoints = neps
- c.pinned = npin
-
- return nil
-}
-
-func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
- for {
- err := c.Sync(ctx)
- if err != nil {
- return err
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-ticker.C:
- }
- }
-}
-
-func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
- act := &getAction{Prefix: "/version"}
-
- resp, body, err := c.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- switch resp.StatusCode {
- case http.StatusOK:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- var vresp version.Versions
- if err := json.Unmarshal(body, &vresp); err != nil {
- return nil, ErrInvalidJSON
- }
- return &vresp, nil
- default:
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return nil, ErrInvalidJSON
- }
- return nil, etcdErr
- }
-}
-
-type roundTripResponse struct {
- resp *http.Response
- err error
-}
-
-type simpleHTTPClient struct {
- transport CancelableTransport
- endpoint url.URL
- headerTimeout time.Duration
-}
-
-func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- req := act.HTTPRequest(c.endpoint)
-
- if err := printcURL(req); err != nil {
- return nil, nil, err
- }
-
- isWait := false
- if req != nil && req.URL != nil {
- ws := req.URL.Query().Get("wait")
- if len(ws) != 0 {
- var err error
- isWait, err = strconv.ParseBool(ws)
- if err != nil {
- return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
- }
- }
- }
-
- var hctx context.Context
- var hcancel context.CancelFunc
- if !isWait && c.headerTimeout > 0 {
- hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
- } else {
- hctx, hcancel = context.WithCancel(ctx)
- }
- defer hcancel()
-
- reqcancel := requestCanceler(c.transport, req)
-
- rtchan := make(chan roundTripResponse, 1)
- go func() {
- resp, err := c.transport.RoundTrip(req)
- rtchan <- roundTripResponse{resp: resp, err: err}
- close(rtchan)
- }()
-
- var resp *http.Response
- var err error
-
- select {
- case rtresp := <-rtchan:
- resp, err = rtresp.resp, rtresp.err
- case <-hctx.Done():
- // cancel and wait for request to actually exit before continuing
- reqcancel()
- rtresp := <-rtchan
- resp = rtresp.resp
- switch {
- case ctx.Err() != nil:
- err = ctx.Err()
- case hctx.Err() != nil:
- err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
- default:
- panic("failed to get error from context")
- }
- }
-
- // always check for resp nil-ness to deal with possible
- // race conditions between channels above
- defer func() {
- if resp != nil {
- resp.Body.Close()
- }
- }()
-
- if err != nil {
- return nil, nil, err
- }
-
- var body []byte
- done := make(chan struct{})
- go func() {
- body, err = ioutil.ReadAll(resp.Body)
- done <- struct{}{}
- }()
-
- select {
- case <-ctx.Done():
- resp.Body.Close()
- <-done
- return nil, nil, ctx.Err()
- case <-done:
- }
-
- return resp, body, err
-}
-
-type authedAction struct {
- act httpAction
- credentials credentials
-}
-
-func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
- r := a.act.HTTPRequest(url)
- r.SetBasicAuth(a.credentials.username, a.credentials.password)
- return r
-}
-
-type redirectFollowingHTTPClient struct {
- client httpClient
- checkRedirect CheckRedirectFunc
-}
-
-func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- next := act
- for i := 0; i < 100; i++ {
- if i > 0 {
- if err := r.checkRedirect(i); err != nil {
- return nil, nil, err
- }
- }
- resp, body, err := r.client.Do(ctx, next)
- if err != nil {
- return nil, nil, err
- }
- if resp.StatusCode/100 == 3 {
- hdr := resp.Header.Get("Location")
- if hdr == "" {
- return nil, nil, fmt.Errorf("Location header not set")
- }
- loc, err := url.Parse(hdr)
- if err != nil {
- return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
- }
- next = &redirectedHTTPAction{
- action: act,
- location: *loc,
- }
- continue
- }
- return resp, body, nil
- }
-
- return nil, nil, errTooManyRedirectChecks
-}
-
-type redirectedHTTPAction struct {
- action httpAction
- location url.URL
-}
-
-func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
- orig := r.action.HTTPRequest(ep)
- orig.URL = &r.location
- return orig
-}
-
-func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
- // copied from Go 1.9<= rand.Rand.Perm
- n := len(eps)
- p := make([]int, n)
- for i := 0; i < n; i++ {
- j := r.Intn(i + 1)
- p[i] = p[j]
- p[j] = i
- }
- neps := make([]url.URL, n)
- for i, k := range p {
- neps[i] = eps[k]
- }
- return neps
-}
-
-func endpointsEqual(left, right []url.URL) bool {
- if len(left) != len(right) {
- return false
- }
-
- sLeft := make([]string, len(left))
- sRight := make([]string, len(right))
- for i, l := range left {
- sLeft[i] = l.String()
- }
- for i, r := range right {
- sRight[i] = r.String()
- }
-
- sort.Strings(sLeft)
- sort.Strings(sRight)
- for i := range sLeft {
- if sLeft[i] != sRight[i] {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go
deleted file mode 100644
index 34618cd..0000000
--- a/vendor/github.com/coreos/etcd/client/cluster_error.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import "fmt"
-
-type ClusterError struct {
- Errors []error
-}
-
-func (ce *ClusterError) Error() string {
- s := ErrClusterUnavailable.Error()
- for i, e := range ce.Errors {
- s += fmt.Sprintf("; error #%d: %s\n", i, e)
- }
- return s
-}
-
-func (ce *ClusterError) Detail() string {
- s := ""
- for i, e := range ce.Errors {
- s += fmt.Sprintf("error #%d: %s\n", i, e)
- }
- return s
-}
diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go
deleted file mode 100644
index c8bc9fb..0000000
--- a/vendor/github.com/coreos/etcd/client/curl.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
-)
-
-var (
- cURLDebug = false
-)
-
-func EnablecURLDebug() {
- cURLDebug = true
-}
-
-func DisablecURLDebug() {
- cURLDebug = false
-}
-
-// printcURL prints the cURL equivalent request to stderr.
-// It returns an error if the body of the request cannot
-// be read.
-// The caller MUST cancel the request if there is an error.
-func printcURL(req *http.Request) error {
- if !cURLDebug {
- return nil
- }
- var (
- command string
- b []byte
- err error
- )
-
- if req.URL != nil {
- command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
- }
-
- if req.Body != nil {
- b, err = ioutil.ReadAll(req.Body)
- if err != nil {
- return err
- }
- command += fmt.Sprintf(" -d %q", string(b))
- }
-
- fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
-
- // reset body
- body := bytes.NewBuffer(b)
- req.Body = ioutil.NopCloser(body)
-
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go
deleted file mode 100644
index 442e35f..0000000
--- a/vendor/github.com/coreos/etcd/client/discover.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "github.com/coreos/etcd/pkg/srv"
-)
-
-// Discoverer is an interface that wraps the Discover method.
-type Discoverer interface {
- // Discover looks up the etcd servers for the domain.
- Discover(domain string) ([]string, error)
-}
-
-type srvDiscover struct{}
-
-// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
-func NewSRVDiscover() Discoverer {
- return &srvDiscover{}
-}
-
-func (d *srvDiscover) Discover(domain string) ([]string, error) {
- srvs, err := srv.GetClient("etcd-client", domain)
- if err != nil {
- return nil, err
- }
- return srvs.Endpoints, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go
deleted file mode 100644
index ad4eca4..0000000
--- a/vendor/github.com/coreos/etcd/client/doc.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package client provides bindings for the etcd APIs.
-
-Create a Config and exchange it for a Client:
-
- import (
- "net/http"
- "context"
-
- "github.com/coreos/etcd/client"
- )
-
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: DefaultTransport,
- }
-
- c, err := client.New(cfg)
- if err != nil {
- // handle error
- }
-
-Clients are safe for concurrent use by multiple goroutines.
-
-Create a KeysAPI using the Client, then use it to interact with etcd:
-
- kAPI := client.NewKeysAPI(c)
-
- // create a new key /foo with the value "bar"
- _, err = kAPI.Create(context.Background(), "/foo", "bar")
- if err != nil {
- // handle error
- }
-
- // delete the newly created key only if the value is still "bar"
- _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
- if err != nil {
- // handle error
- }
-
-Use a custom context to set timeouts on your operations:
-
- import "time"
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- // set a new key, ignoring its previous state
- _, err := kAPI.Set(ctx, "/ping", "pong", nil)
- if err != nil {
- if err == context.DeadlineExceeded {
- // request took longer than 5s
- } else {
- // handle error
- }
- }
-
-*/
-package client
diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go
deleted file mode 100644
index 237fdbe..0000000
--- a/vendor/github.com/coreos/etcd/client/keys.generated.go
+++ /dev/null
@@ -1,5218 +0,0 @@
-// ************************************************************
-// DO NOT EDIT.
-// THIS FILE IS AUTO-GENERATED BY codecgen.
-// ************************************************************
-
-package client
-
-import (
- "errors"
- "fmt"
- "reflect"
- "runtime"
- time "time"
-
- codec1978 "github.com/ugorji/go/codec"
-)
-
-const (
- // ----- content types ----
- codecSelferC_UTF87612 = 1
- codecSelferC_RAW7612 = 0
- // ----- value types used ----
- codecSelferValueTypeArray7612 = 10
- codecSelferValueTypeMap7612 = 9
- // ----- containerStateValues ----
- codecSelfer_containerMapKey7612 = 2
- codecSelfer_containerMapValue7612 = 3
- codecSelfer_containerMapEnd7612 = 4
- codecSelfer_containerArrayElem7612 = 6
- codecSelfer_containerArrayEnd7612 = 7
-)
-
-var (
- codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits())
- codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`)
-)
-
-type codecSelfer7612 struct{}
-
-func init() {
- if codec1978.GenVersion != 8 {
- _, file, _, _ := runtime.Caller(0)
- err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
- 8, codec1978.GenVersion, file)
- panic(err)
- }
- if false { // reference the types, but skip this branch at build/run time
- var v0 time.Duration
- _ = v0
- }
-}
-
-func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeInt(int64(x.Code))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("errorCode"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeInt(int64(x.Code))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Message))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("message"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Message))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Cause))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("cause"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Cause))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeUint(uint64(x.Index))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("index"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeUint(uint64(x.Index))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "errorCode":
- if r.TryDecodeAsNil() {
- x.Code = 0
- } else {
- yyv4 := &x.Code
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612))
- }
- }
- case "message":
- if r.TryDecodeAsNil() {
- x.Message = ""
- } else {
- yyv6 := &x.Message
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "cause":
- if r.TryDecodeAsNil() {
- x.Cause = ""
- } else {
- yyv8 := &x.Cause
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "index":
- if r.TryDecodeAsNil() {
- x.Index = 0
- } else {
- yyv10 := &x.Index
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*uint64)(yyv10)) = uint64(r.DecodeUint(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Code = 0
- } else {
- yyv13 := &x.Code
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612))
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Message = ""
- } else {
- yyv15 := &x.Message
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Cause = ""
- } else {
- yyv17 := &x.Cause
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Index = 0
- } else {
- yyv19 := &x.Index
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*uint64)(yyv19)) = uint64(r.DecodeUint(64))
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x))
- }
-}
-
-func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- *((*string)(x)) = r.DecodeString()
- }
-}
-
-func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(2)
- } else {
- r.WriteMapStart(2)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeUint(uint64(x.AfterIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("AfterIndex"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeUint(uint64(x.AfterIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "AfterIndex":
- if r.TryDecodeAsNil() {
- x.AfterIndex = 0
- } else {
- yyv4 := &x.AfterIndex
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*uint64)(yyv4)) = uint64(r.DecodeUint(64))
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv6 := &x.Recursive
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*bool)(yyv6)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj8 int
- var yyb8 bool
- var yyhl8 bool = l >= 0
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.AfterIndex = 0
- } else {
- yyv9 := &x.AfterIndex
- yym10 := z.DecBinary()
- _ = yym10
- if false {
- } else {
- *((*uint64)(yyv9)) = uint64(r.DecodeUint(64))
- }
- }
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv11 := &x.Recursive
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*bool)(yyv11)) = r.DecodeBool()
- }
- }
- for {
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj8-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(1)
- } else {
- r.WriteMapStart(1)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv4 := &x.TTL
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv4) {
- } else {
- *((*int64)(yyv4)) = int64(r.DecodeInt(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj6 int
- var yyb6 bool
- var yyhl6 bool = l >= 0
- yyj6++
- if yyhl6 {
- yyb6 = yyj6 > l
- } else {
- yyb6 = r.CheckBreak()
- }
- if yyb6 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv7 := &x.TTL
- yym8 := z.DecBinary()
- _ = yym8
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv7) {
- } else {
- *((*int64)(yyv7)) = int64(r.DecodeInt(64))
- }
- }
- for {
- yyj6++
- if yyhl6 {
- yyb6 = yyj6 > l
- } else {
- yyb6 = r.CheckBreak()
- }
- if yyb6 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj6-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(7)
- } else {
- r.WriteMapStart(7)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- x.PrevExist.CodecEncodeSelf(e)
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevExist"))
- r.WriteMapElemValue()
- x.PrevExist.CodecEncodeSelf(e)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Refresh"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym19 := z.EncBinary()
- _ = yym19
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym20 := z.EncBinary()
- _ = yym20
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym22 := z.EncBinary()
- _ = yym22
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess"))
- r.WriteMapElemValue()
- yym23 := z.EncBinary()
- _ = yym23
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv4 := &x.PrevValue
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv6 := &x.PrevIndex
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*uint64)(yyv6)) = uint64(r.DecodeUint(64))
- }
- }
- case "PrevExist":
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv8 := &x.PrevExist
- yyv8.CodecDecodeSelf(d)
- }
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv9 := &x.TTL
- yym10 := z.DecBinary()
- _ = yym10
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv9) {
- } else {
- *((*int64)(yyv9)) = int64(r.DecodeInt(64))
- }
- }
- case "Refresh":
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv11 := &x.Refresh
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*bool)(yyv11)) = r.DecodeBool()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv13 := &x.Dir
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*bool)(yyv13)) = r.DecodeBool()
- }
- }
- case "NoValueOnSuccess":
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv15 := &x.NoValueOnSuccess
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*bool)(yyv15)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj17 int
- var yyb17 bool
- var yyhl17 bool = l >= 0
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv18 := &x.PrevValue
- yym19 := z.DecBinary()
- _ = yym19
- if false {
- } else {
- *((*string)(yyv18)) = r.DecodeString()
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv20 := &x.PrevIndex
- yym21 := z.DecBinary()
- _ = yym21
- if false {
- } else {
- *((*uint64)(yyv20)) = uint64(r.DecodeUint(64))
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv22 := &x.PrevExist
- yyv22.CodecDecodeSelf(d)
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv23 := &x.TTL
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv23) {
- } else {
- *((*int64)(yyv23)) = int64(r.DecodeInt(64))
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv25 := &x.Refresh
- yym26 := z.DecBinary()
- _ = yym26
- if false {
- } else {
- *((*bool)(yyv25)) = r.DecodeBool()
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv27 := &x.Dir
- yym28 := z.DecBinary()
- _ = yym28
- if false {
- } else {
- *((*bool)(yyv27)) = r.DecodeBool()
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv29 := &x.NoValueOnSuccess
- yym30 := z.DecBinary()
- _ = yym30
- if false {
- } else {
- *((*bool)(yyv29)) = r.DecodeBool()
- }
- }
- for {
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj17-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(3)
- } else {
- r.WriteMapStart(3)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeBool(bool(x.Sort))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Sort"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeBool(bool(x.Sort))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Quorum"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv4 := &x.Recursive
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*bool)(yyv4)) = r.DecodeBool()
- }
- }
- case "Sort":
- if r.TryDecodeAsNil() {
- x.Sort = false
- } else {
- yyv6 := &x.Sort
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*bool)(yyv6)) = r.DecodeBool()
- }
- }
- case "Quorum":
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv8 := &x.Quorum
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*bool)(yyv8)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj10 int
- var yyb10 bool
- var yyhl10 bool = l >= 0
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv11 := &x.Recursive
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*bool)(yyv11)) = r.DecodeBool()
- }
- }
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Sort = false
- } else {
- yyv13 := &x.Sort
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*bool)(yyv13)) = r.DecodeBool()
- }
- }
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv15 := &x.Quorum
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*bool)(yyv15)) = r.DecodeBool()
- }
- }
- for {
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj10-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv4 := &x.PrevValue
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv6 := &x.PrevIndex
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*uint64)(yyv6)) = uint64(r.DecodeUint(64))
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv8 := &x.Recursive
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*bool)(yyv8)) = r.DecodeBool()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv10 := &x.Dir
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*bool)(yyv10)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv13 := &x.PrevValue
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*string)(yyv13)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv15 := &x.PrevIndex
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*uint64)(yyv15)) = uint64(r.DecodeUint(64))
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv17 := &x.Recursive
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*bool)(yyv17)) = r.DecodeBool()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv19 := &x.Dir
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(3)
- } else {
- r.WriteMapStart(3)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Action))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("action"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Action))
- }
- }
- var yyn6 bool
- if x.Node == nil {
- yyn6 = true
- goto LABEL6
- }
- LABEL6:
- if yyr2 || yy2arr2 {
- if yyn6 {
- r.WriteArrayElem()
- r.EncodeNil()
- } else {
- r.WriteArrayElem()
- if x.Node == nil {
- r.EncodeNil()
- } else {
- x.Node.CodecEncodeSelf(e)
- }
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("node"))
- r.WriteMapElemValue()
- if yyn6 {
- r.EncodeNil()
- } else {
- if x.Node == nil {
- r.EncodeNil()
- } else {
- x.Node.CodecEncodeSelf(e)
- }
- }
- }
- var yyn9 bool
- if x.PrevNode == nil {
- yyn9 = true
- goto LABEL9
- }
- LABEL9:
- if yyr2 || yy2arr2 {
- if yyn9 {
- r.WriteArrayElem()
- r.EncodeNil()
- } else {
- r.WriteArrayElem()
- if x.PrevNode == nil {
- r.EncodeNil()
- } else {
- x.PrevNode.CodecEncodeSelf(e)
- }
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("prevNode"))
- r.WriteMapElemValue()
- if yyn9 {
- r.EncodeNil()
- } else {
- if x.PrevNode == nil {
- r.EncodeNil()
- } else {
- x.PrevNode.CodecEncodeSelf(e)
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "action":
- if r.TryDecodeAsNil() {
- x.Action = ""
- } else {
- yyv4 := &x.Action
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "node":
- if x.Node == nil {
- x.Node = new(Node)
- }
- if r.TryDecodeAsNil() {
- if x.Node != nil {
- x.Node = nil
- }
- } else {
- if x.Node == nil {
- x.Node = new(Node)
- }
- x.Node.CodecDecodeSelf(d)
- }
- case "prevNode":
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- if r.TryDecodeAsNil() {
- if x.PrevNode != nil {
- x.PrevNode = nil
- }
- } else {
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- x.PrevNode.CodecDecodeSelf(d)
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj8 int
- var yyb8 bool
- var yyhl8 bool = l >= 0
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Action = ""
- } else {
- yyv9 := &x.Action
- yym10 := z.DecBinary()
- _ = yym10
- if false {
- } else {
- *((*string)(yyv9)) = r.DecodeString()
- }
- }
- if x.Node == nil {
- x.Node = new(Node)
- }
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- if x.Node != nil {
- x.Node = nil
- }
- } else {
- if x.Node == nil {
- x.Node = new(Node)
- }
- x.Node.CodecDecodeSelf(d)
- }
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- if x.PrevNode != nil {
- x.PrevNode = nil
- }
- } else {
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- x.PrevNode.CodecDecodeSelf(d)
- }
- for {
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj8-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- var yyq2 [8]bool
- _ = yyq2
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- yyq2[1] = x.Dir != false
- yyq2[6] = x.Expiration != nil
- yyq2[7] = x.TTL != 0
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(8)
- } else {
- var yynn2 = 5
- for _, b := range yyq2 {
- if b {
- yynn2++
- }
- }
- r.WriteMapStart(yynn2)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("key"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- if yyq2[1] {
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.EncodeBool(false)
- }
- } else {
- if yyq2[1] {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("dir"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("value"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- if x.Nodes == nil {
- r.EncodeNil()
- } else {
- x.Nodes.CodecEncodeSelf(e)
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("nodes"))
- r.WriteMapElemValue()
- if x.Nodes == nil {
- r.EncodeNil()
- } else {
- x.Nodes.CodecEncodeSelf(e)
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeUint(uint64(x.CreatedIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("createdIndex"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeUint(uint64(x.CreatedIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym19 := z.EncBinary()
- _ = yym19
- if false {
- } else {
- r.EncodeUint(uint64(x.ModifiedIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex"))
- r.WriteMapElemValue()
- yym20 := z.EncBinary()
- _ = yym20
- if false {
- } else {
- r.EncodeUint(uint64(x.ModifiedIndex))
- }
- }
- var yyn21 bool
- if x.Expiration == nil {
- yyn21 = true
- goto LABEL21
- }
- LABEL21:
- if yyr2 || yy2arr2 {
- if yyn21 {
- r.WriteArrayElem()
- r.EncodeNil()
- } else {
- r.WriteArrayElem()
- if yyq2[6] {
- if x.Expiration == nil {
- r.EncodeNil()
- } else {
- yym22 := z.EncBinary()
- _ = yym22
- if false {
- } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 {
- r.EncodeBuiltin(yym23, x.Expiration)
- } else if z.HasExtensions() && z.EncExt(x.Expiration) {
- } else if yym22 {
- z.EncBinaryMarshal(x.Expiration)
- } else if !yym22 && z.IsJSONHandle() {
- z.EncJSONMarshal(x.Expiration)
- } else {
- z.EncFallback(x.Expiration)
- }
- }
- } else {
- r.EncodeNil()
- }
- }
- } else {
- if yyq2[6] {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("expiration"))
- r.WriteMapElemValue()
- if yyn21 {
- r.EncodeNil()
- } else {
- if x.Expiration == nil {
- r.EncodeNil()
- } else {
- yym24 := z.EncBinary()
- _ = yym24
- if false {
- } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 {
- r.EncodeBuiltin(yym25, x.Expiration)
- } else if z.HasExtensions() && z.EncExt(x.Expiration) {
- } else if yym24 {
- z.EncBinaryMarshal(x.Expiration)
- } else if !yym24 && z.IsJSONHandle() {
- z.EncJSONMarshal(x.Expiration)
- } else {
- z.EncFallback(x.Expiration)
- }
- }
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- if yyq2[7] {
- yym27 := z.EncBinary()
- _ = yym27
- if false {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.EncodeInt(0)
- }
- } else {
- if yyq2[7] {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("ttl"))
- r.WriteMapElemValue()
- yym28 := z.EncBinary()
- _ = yym28
- if false {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv4 := &x.Key
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv6 := &x.Dir
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*bool)(yyv6)) = r.DecodeBool()
- }
- }
- case "value":
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv8 := &x.Value
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "nodes":
- if r.TryDecodeAsNil() {
- x.Nodes = nil
- } else {
- yyv10 := &x.Nodes
- yyv10.CodecDecodeSelf(d)
- }
- case "createdIndex":
- if r.TryDecodeAsNil() {
- x.CreatedIndex = 0
- } else {
- yyv11 := &x.CreatedIndex
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*uint64)(yyv11)) = uint64(r.DecodeUint(64))
- }
- }
- case "modifiedIndex":
- if r.TryDecodeAsNil() {
- x.ModifiedIndex = 0
- } else {
- yyv13 := &x.ModifiedIndex
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*uint64)(yyv13)) = uint64(r.DecodeUint(64))
- }
- }
- case "expiration":
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- if r.TryDecodeAsNil() {
- if x.Expiration != nil {
- x.Expiration = nil
- }
- } else {
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 {
- r.DecodeBuiltin(yym17, x.Expiration)
- } else if z.HasExtensions() && z.DecExt(x.Expiration) {
- } else if yym16 {
- z.DecBinaryUnmarshal(x.Expiration)
- } else if !yym16 && z.IsJSONHandle() {
- z.DecJSONUnmarshal(x.Expiration)
- } else {
- z.DecFallback(x.Expiration, false)
- }
- }
- case "ttl":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv18 := &x.TTL
- yym19 := z.DecBinary()
- _ = yym19
- if false {
- } else {
- *((*int64)(yyv18)) = int64(r.DecodeInt(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj20 int
- var yyb20 bool
- var yyhl20 bool = l >= 0
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv21 := &x.Key
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*string)(yyv21)) = r.DecodeString()
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv23 := &x.Dir
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else {
- *((*bool)(yyv23)) = r.DecodeBool()
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv25 := &x.Value
- yym26 := z.DecBinary()
- _ = yym26
- if false {
- } else {
- *((*string)(yyv25)) = r.DecodeString()
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Nodes = nil
- } else {
- yyv27 := &x.Nodes
- yyv27.CodecDecodeSelf(d)
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.CreatedIndex = 0
- } else {
- yyv28 := &x.CreatedIndex
- yym29 := z.DecBinary()
- _ = yym29
- if false {
- } else {
- *((*uint64)(yyv28)) = uint64(r.DecodeUint(64))
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.ModifiedIndex = 0
- } else {
- yyv30 := &x.ModifiedIndex
- yym31 := z.DecBinary()
- _ = yym31
- if false {
- } else {
- *((*uint64)(yyv30)) = uint64(r.DecodeUint(64))
- }
- }
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- if x.Expiration != nil {
- x.Expiration = nil
- }
- } else {
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- yym33 := z.DecBinary()
- _ = yym33
- if false {
- } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 {
- r.DecodeBuiltin(yym34, x.Expiration)
- } else if z.HasExtensions() && z.DecExt(x.Expiration) {
- } else if yym33 {
- z.DecBinaryUnmarshal(x.Expiration)
- } else if !yym33 && z.IsJSONHandle() {
- z.DecJSONUnmarshal(x.Expiration)
- } else {
- z.DecFallback(x.Expiration, false)
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv35 := &x.TTL
- yym36 := z.DecBinary()
- _ = yym36
- if false {
- } else {
- *((*int64)(yyv35)) = int64(r.DecodeInt(64))
- }
- }
- for {
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj20-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- h.encNodes((Nodes)(x), e)
- }
- }
-}
-
-func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- h.decNodes((*Nodes)(x), d)
- }
-}
-
-func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(0)
- } else {
- r.WriteMapStart(0)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj4 int
- var yyb4 bool
- var yyhl4 bool = l >= 0
- for {
- yyj4++
- if yyhl4 {
- yyb4 = yyj4 > l
- } else {
- yyb4 = r.CheckBreak()
- }
- if yyb4 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj4-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(0)
- } else {
- r.WriteMapStart(0)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj4 int
- var yyb4 bool
- var yyhl4 bool = l >= 0
- for {
- yyj4++
- if yyhl4 {
- yyb4 = yyj4 > l
- } else {
- yyb4 = r.CheckBreak()
- }
- if yyb4 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj4-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(5)
- } else {
- r.WriteMapStart(5)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeBool(bool(x.Sorted))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Sorted"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeBool(bool(x.Sorted))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Quorum"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv8 := &x.Recursive
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*bool)(yyv8)) = r.DecodeBool()
- }
- }
- case "Sorted":
- if r.TryDecodeAsNil() {
- x.Sorted = false
- } else {
- yyv10 := &x.Sorted
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*bool)(yyv10)) = r.DecodeBool()
- }
- }
- case "Quorum":
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv12 := &x.Quorum
- yym13 := z.DecBinary()
- _ = yym13
- if false {
- } else {
- *((*bool)(yyv12)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj14 int
- var yyb14 bool
- var yyhl14 bool = l >= 0
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv15 := &x.Prefix
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv17 := &x.Key
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv19 := &x.Recursive
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Sorted = false
- } else {
- yyv21 := &x.Sorted
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*bool)(yyv21)) = r.DecodeBool()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv23 := &x.Quorum
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else {
- *((*bool)(yyv23)) = r.DecodeBool()
- }
- }
- for {
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj14-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeUint(uint64(x.WaitIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("WaitIndex"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeUint(uint64(x.WaitIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "WaitIndex":
- if r.TryDecodeAsNil() {
- x.WaitIndex = 0
- } else {
- yyv8 := &x.WaitIndex
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*uint64)(yyv8)) = uint64(r.DecodeUint(64))
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv10 := &x.Recursive
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*bool)(yyv10)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv13 := &x.Prefix
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*string)(yyv13)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv15 := &x.Key
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.WaitIndex = 0
- } else {
- yyv17 := &x.WaitIndex
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*uint64)(yyv17)) = uint64(r.DecodeUint(64))
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv19 := &x.Recursive
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(10)
- } else {
- r.WriteMapStart(10)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Value"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- x.PrevExist.CodecEncodeSelf(e)
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevExist"))
- r.WriteMapElemValue()
- x.PrevExist.CodecEncodeSelf(e)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym22 := z.EncBinary()
- _ = yym22
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym23 := z.EncBinary()
- _ = yym23
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym25 := z.EncBinary()
- _ = yym25
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Refresh"))
- r.WriteMapElemValue()
- yym26 := z.EncBinary()
- _ = yym26
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym28 := z.EncBinary()
- _ = yym28
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym29 := z.EncBinary()
- _ = yym29
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym31 := z.EncBinary()
- _ = yym31
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess"))
- r.WriteMapElemValue()
- yym32 := z.EncBinary()
- _ = yym32
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "Value":
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv8 := &x.Value
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv10 := &x.PrevValue
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*string)(yyv10)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv12 := &x.PrevIndex
- yym13 := z.DecBinary()
- _ = yym13
- if false {
- } else {
- *((*uint64)(yyv12)) = uint64(r.DecodeUint(64))
- }
- }
- case "PrevExist":
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv14 := &x.PrevExist
- yyv14.CodecDecodeSelf(d)
- }
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv15 := &x.TTL
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv15) {
- } else {
- *((*int64)(yyv15)) = int64(r.DecodeInt(64))
- }
- }
- case "Refresh":
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv17 := &x.Refresh
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*bool)(yyv17)) = r.DecodeBool()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv19 := &x.Dir
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- case "NoValueOnSuccess":
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv21 := &x.NoValueOnSuccess
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*bool)(yyv21)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj23 int
- var yyb23 bool
- var yyhl23 bool = l >= 0
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv24 := &x.Prefix
- yym25 := z.DecBinary()
- _ = yym25
- if false {
- } else {
- *((*string)(yyv24)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv26 := &x.Key
- yym27 := z.DecBinary()
- _ = yym27
- if false {
- } else {
- *((*string)(yyv26)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv28 := &x.Value
- yym29 := z.DecBinary()
- _ = yym29
- if false {
- } else {
- *((*string)(yyv28)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv30 := &x.PrevValue
- yym31 := z.DecBinary()
- _ = yym31
- if false {
- } else {
- *((*string)(yyv30)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv32 := &x.PrevIndex
- yym33 := z.DecBinary()
- _ = yym33
- if false {
- } else {
- *((*uint64)(yyv32)) = uint64(r.DecodeUint(64))
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv34 := &x.PrevExist
- yyv34.CodecDecodeSelf(d)
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv35 := &x.TTL
- yym36 := z.DecBinary()
- _ = yym36
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv35) {
- } else {
- *((*int64)(yyv35)) = int64(r.DecodeInt(64))
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv37 := &x.Refresh
- yym38 := z.DecBinary()
- _ = yym38
- if false {
- } else {
- *((*bool)(yyv37)) = r.DecodeBool()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv39 := &x.Dir
- yym40 := z.DecBinary()
- _ = yym40
- if false {
- } else {
- *((*bool)(yyv39)) = r.DecodeBool()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv41 := &x.NoValueOnSuccess
- yym42 := z.DecBinary()
- _ = yym42
- if false {
- } else {
- *((*bool)(yyv41)) = r.DecodeBool()
- }
- }
- for {
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj23-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(6)
- } else {
- r.WriteMapStart(6)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym19 := z.EncBinary()
- _ = yym19
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym20 := z.EncBinary()
- _ = yym20
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv8 := &x.PrevValue
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv10 := &x.PrevIndex
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*uint64)(yyv10)) = uint64(r.DecodeUint(64))
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv12 := &x.Dir
- yym13 := z.DecBinary()
- _ = yym13
- if false {
- } else {
- *((*bool)(yyv12)) = r.DecodeBool()
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv14 := &x.Recursive
- yym15 := z.DecBinary()
- _ = yym15
- if false {
- } else {
- *((*bool)(yyv14)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj16 int
- var yyb16 bool
- var yyhl16 bool = l >= 0
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv17 := &x.Prefix
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv19 := &x.Key
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*string)(yyv19)) = r.DecodeString()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv21 := &x.PrevValue
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*string)(yyv21)) = r.DecodeString()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv23 := &x.PrevIndex
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else {
- *((*uint64)(yyv23)) = uint64(r.DecodeUint(64))
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv25 := &x.Dir
- yym26 := z.DecBinary()
- _ = yym26
- if false {
- } else {
- *((*bool)(yyv25)) = r.DecodeBool()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv27 := &x.Recursive
- yym28 := z.DecBinary()
- _ = yym28
- if false {
- } else {
- *((*bool)(yyv27)) = r.DecodeBool()
- }
- }
- for {
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj16-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Value"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = ""
- } else {
- yyv6 := &x.Dir
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "Value":
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv8 := &x.Value
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv10 := &x.TTL
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv10) {
- } else {
- *((*int64)(yyv10)) = int64(r.DecodeInt(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv13 := &x.Prefix
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*string)(yyv13)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = ""
- } else {
- yyv15 := &x.Dir
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv17 := &x.Value
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv19 := &x.TTL
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv19) {
- } else {
- *((*int64)(yyv19)) = int64(r.DecodeInt(64))
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- r.WriteArrayStart(len(v))
- for _, yyv1 := range v {
- r.WriteArrayElem()
- if yyv1 == nil {
- r.EncodeNil()
- } else {
- yyv1.CodecEncodeSelf(e)
- }
- }
- r.WriteArrayEnd()
-}
-
-func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
-
- yyv1 := *v
- yyh1, yyl1 := z.DecSliceHelperStart()
- var yyc1 bool
- _ = yyc1
- if yyl1 == 0 {
- if yyv1 == nil {
- yyv1 = []*Node{}
- yyc1 = true
- } else if len(yyv1) != 0 {
- yyv1 = yyv1[:0]
- yyc1 = true
- }
- } else {
- yyhl1 := yyl1 > 0
- var yyrl1 int
- _ = yyrl1
- if yyhl1 {
- if yyl1 > cap(yyv1) {
- yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
- if yyrl1 <= cap(yyv1) {
- yyv1 = yyv1[:yyrl1]
- } else {
- yyv1 = make([]*Node, yyrl1)
- }
- yyc1 = true
- } else if yyl1 != len(yyv1) {
- yyv1 = yyv1[:yyl1]
- yyc1 = true
- }
- }
- var yyj1 int
- // var yydn1 bool
- for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ {
- if yyj1 == 0 && len(yyv1) == 0 {
- if yyhl1 {
- yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
- } else {
- yyrl1 = 8
- }
- yyv1 = make([]*Node, yyrl1)
- yyc1 = true
- }
- yyh1.ElemContainerState(yyj1)
- // yydn1 = r.TryDecodeAsNil()
-
- // if indefinite, etc, then expand the slice if necessary
- var yydb1 bool
- if yyj1 >= len(yyv1) {
- yyv1 = append(yyv1, nil)
- yyc1 = true
-
- }
- if yydb1 {
- z.DecSwallow()
- } else {
- if r.TryDecodeAsNil() {
- if yyv1[yyj1] != nil {
- *yyv1[yyj1] = Node{}
- }
- } else {
- if yyv1[yyj1] == nil {
- yyv1[yyj1] = new(Node)
- }
- yyw2 := yyv1[yyj1]
- yyw2.CodecDecodeSelf(d)
- }
-
- }
-
- }
- if yyj1 < len(yyv1) {
- yyv1 = yyv1[:yyj1]
- yyc1 = true
- } else if yyj1 == 0 && yyv1 == nil {
- yyv1 = make([]*Node, 0)
- yyc1 = true
- }
- }
- yyh1.End()
- if yyc1 {
- *v = yyv1
- }
-
-}
diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go
deleted file mode 100644
index 8b9fd3f..0000000
--- a/vendor/github.com/coreos/etcd/client/keys.go
+++ /dev/null
@@ -1,681 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/coreos/etcd/pkg/pathutil"
- "github.com/ugorji/go/codec"
-)
-
-const (
- ErrorCodeKeyNotFound = 100
- ErrorCodeTestFailed = 101
- ErrorCodeNotFile = 102
- ErrorCodeNotDir = 104
- ErrorCodeNodeExist = 105
- ErrorCodeRootROnly = 107
- ErrorCodeDirNotEmpty = 108
- ErrorCodeUnauthorized = 110
-
- ErrorCodePrevValueRequired = 201
- ErrorCodeTTLNaN = 202
- ErrorCodeIndexNaN = 203
- ErrorCodeInvalidField = 209
- ErrorCodeInvalidForm = 210
-
- ErrorCodeRaftInternal = 300
- ErrorCodeLeaderElect = 301
-
- ErrorCodeWatcherCleared = 400
- ErrorCodeEventIndexCleared = 401
-)
-
-type Error struct {
- Code int `json:"errorCode"`
- Message string `json:"message"`
- Cause string `json:"cause"`
- Index uint64 `json:"index"`
-}
-
-func (e Error) Error() string {
- return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
-}
-
-var (
- ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
- ErrEmptyBody = errors.New("client: response body is empty")
-)
-
-// PrevExistType is used to define an existence condition when setting
-// or deleting Nodes.
-type PrevExistType string
-
-const (
- PrevIgnore = PrevExistType("")
- PrevExist = PrevExistType("true")
- PrevNoExist = PrevExistType("false")
-)
-
-var (
- defaultV2KeysPrefix = "/v2/keys"
-)
-
-// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
-// API over HTTP.
-func NewKeysAPI(c Client) KeysAPI {
- return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
-}
-
-// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
-// to provide a custom base URL path. This should only be used in
-// very rare cases.
-func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
- return &httpKeysAPI{
- client: c,
- prefix: p,
- }
-}
-
-type KeysAPI interface {
- // Get retrieves a set of Nodes from etcd
- Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
-
- // Set assigns a new value to a Node identified by a given key. The caller
- // may define a set of conditions in the SetOptions. If SetOptions.Dir=true
- // then value is ignored.
- Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
-
- // Delete removes a Node identified by the given key, optionally destroying
- // all of its children as well. The caller may define a set of required
- // conditions in an DeleteOptions object.
- Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
-
- // Create is an alias for Set w/ PrevExist=false
- Create(ctx context.Context, key, value string) (*Response, error)
-
- // CreateInOrder is used to atomically create in-order keys within the given directory.
- CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
-
- // Update is an alias for Set w/ PrevExist=true
- Update(ctx context.Context, key, value string) (*Response, error)
-
- // Watcher builds a new Watcher targeted at a specific Node identified
- // by the given key. The Watcher may be configured at creation time
- // through a WatcherOptions object. The returned Watcher is designed
- // to emit events that happen to a Node, and optionally to its children.
- Watcher(key string, opts *WatcherOptions) Watcher
-}
-
-type WatcherOptions struct {
- // AfterIndex defines the index after-which the Watcher should
- // start emitting events. For example, if a value of 5 is
- // provided, the first event will have an index >= 6.
- //
- // Setting AfterIndex to 0 (default) means that the Watcher
- // should start watching for events starting at the current
- // index, whatever that may be.
- AfterIndex uint64
-
- // Recursive specifies whether or not the Watcher should emit
- // events that occur in children of the given keyspace. If set
- // to false (default), events will be limited to those that
- // occur for the exact key.
- Recursive bool
-}
-
-type CreateInOrderOptions struct {
- // TTL defines a period of time after-which the Node should
- // expire and no longer exist. Values <= 0 are ignored. Given
- // that the zero-value is ignored, TTL cannot be used to set
- // a TTL of 0.
- TTL time.Duration
-}
-
-type SetOptions struct {
- // PrevValue specifies what the current value of the Node must
- // be in order for the Set operation to succeed.
- //
- // Leaving this field empty means that the caller wishes to
- // ignore the current value of the Node. This cannot be used
- // to compare the Node's current value to an empty string.
- //
- // PrevValue is ignored if Dir=true
- PrevValue string
-
- // PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Set operation to succeed.
- //
- // If PrevIndex is set to 0 (default), no comparison is made.
- PrevIndex uint64
-
- // PrevExist specifies whether the Node must currently exist
- // (PrevExist) or not (PrevNoExist). If the caller does not
- // care about existence, set PrevExist to PrevIgnore, or simply
- // leave it unset.
- PrevExist PrevExistType
-
- // TTL defines a period of time after-which the Node should
- // expire and no longer exist. Values <= 0 are ignored. Given
- // that the zero-value is ignored, TTL cannot be used to set
- // a TTL of 0.
- TTL time.Duration
-
- // Refresh set to true means a TTL value can be updated
- // without firing a watch or changing the node value. A
- // value must not be provided when refreshing a key.
- Refresh bool
-
- // Dir specifies whether or not this Node should be created as a directory.
- Dir bool
-
- // NoValueOnSuccess specifies whether the response contains the current value of the Node.
- // If set, the response will only contain the current value when the request fails.
- NoValueOnSuccess bool
-}
-
-type GetOptions struct {
- // Recursive defines whether or not all children of the Node
- // should be returned.
- Recursive bool
-
- // Sort instructs the server whether or not to sort the Nodes.
- // If true, the Nodes are sorted alphabetically by key in
- // ascending order (A to z). If false (default), the Nodes will
- // not be sorted and the ordering used should not be considered
- // predictable.
- Sort bool
-
- // Quorum specifies whether it gets the latest committed value that
- // has been applied in quorum of members, which ensures external
- // consistency (or linearizability).
- Quorum bool
-}
-
-type DeleteOptions struct {
- // PrevValue specifies what the current value of the Node must
- // be in order for the Delete operation to succeed.
- //
- // Leaving this field empty means that the caller wishes to
- // ignore the current value of the Node. This cannot be used
- // to compare the Node's current value to an empty string.
- PrevValue string
-
- // PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Delete operation to succeed.
- //
- // If PrevIndex is set to 0 (default), no comparison is made.
- PrevIndex uint64
-
- // Recursive defines whether or not all children of the Node
- // should be deleted. If set to true, all children of the Node
- // identified by the given key will be deleted. If left unset
- // or explicitly set to false, only a single Node will be
- // deleted.
- Recursive bool
-
- // Dir specifies whether or not this Node should be removed as a directory.
- Dir bool
-}
-
-type Watcher interface {
- // Next blocks until an etcd event occurs, then returns a Response
- // representing that event. The behavior of Next depends on the
- // WatcherOptions used to construct the Watcher. Next is designed to
- // be called repeatedly, each time blocking until a subsequent event
- // is available.
- //
- // If the provided context is cancelled, Next will return a non-nil
- // error. Any other failures encountered while waiting for the next
- // event (connection issues, deserialization failures, etc) will
- // also result in a non-nil error.
- Next(context.Context) (*Response, error)
-}
-
-type Response struct {
- // Action is the name of the operation that occurred. Possible values
- // include get, set, delete, update, create, compareAndSwap,
- // compareAndDelete and expire.
- Action string `json:"action"`
-
- // Node represents the state of the relevant etcd Node.
- Node *Node `json:"node"`
-
- // PrevNode represents the previous state of the Node. PrevNode is non-nil
- // only if the Node existed before the action occurred and the action
- // caused a change to the Node.
- PrevNode *Node `json:"prevNode"`
-
- // Index holds the cluster-level index at the time the Response was generated.
- // This index is not tied to the Node(s) contained in this Response.
- Index uint64 `json:"-"`
-
- // ClusterID holds the cluster-level ID reported by the server. This
- // should be different for different etcd clusters.
- ClusterID string `json:"-"`
-}
-
-type Node struct {
- // Key represents the unique location of this Node (e.g. "/foo/bar").
- Key string `json:"key"`
-
- // Dir reports whether node describes a directory.
- Dir bool `json:"dir,omitempty"`
-
- // Value is the current data stored on this Node. If this Node
- // is a directory, Value will be empty.
- Value string `json:"value"`
-
- // Nodes holds the children of this Node, only if this Node is a directory.
- // This slice of will be arbitrarily deep (children, grandchildren, great-
- // grandchildren, etc.) if a recursive Get or Watch request were made.
- Nodes Nodes `json:"nodes"`
-
- // CreatedIndex is the etcd index at-which this Node was created.
- CreatedIndex uint64 `json:"createdIndex"`
-
- // ModifiedIndex is the etcd index at-which this Node was last modified.
- ModifiedIndex uint64 `json:"modifiedIndex"`
-
- // Expiration is the server side expiration time of the key.
- Expiration *time.Time `json:"expiration,omitempty"`
-
- // TTL is the time to live of the key in second.
- TTL int64 `json:"ttl,omitempty"`
-}
-
-func (n *Node) String() string {
- return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
-}
-
-// TTLDuration returns the Node's TTL as a time.Duration object
-func (n *Node) TTLDuration() time.Duration {
- return time.Duration(n.TTL) * time.Second
-}
-
-type Nodes []*Node
-
-// interfaces for sorting
-
-func (ns Nodes) Len() int { return len(ns) }
-func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
-func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
-
-type httpKeysAPI struct {
- client httpClient
- prefix string
-}
-
-func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
- act := &setAction{
- Prefix: k.prefix,
- Key: key,
- Value: val,
- }
-
- if opts != nil {
- act.PrevValue = opts.PrevValue
- act.PrevIndex = opts.PrevIndex
- act.PrevExist = opts.PrevExist
- act.TTL = opts.TTL
- act.Refresh = opts.Refresh
- act.Dir = opts.Dir
- act.NoValueOnSuccess = opts.NoValueOnSuccess
- }
-
- doCtx := ctx
- if act.PrevExist == PrevNoExist {
- doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
- }
- resp, body, err := k.client.Do(doCtx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
- return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
-}
-
-func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
- act := &createInOrderAction{
- Prefix: k.prefix,
- Dir: dir,
- Value: val,
- }
-
- if opts != nil {
- act.TTL = opts.TTL
- }
-
- resp, body, err := k.client.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
- return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
-}
-
-func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
- act := &deleteAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.PrevValue = opts.PrevValue
- act.PrevIndex = opts.PrevIndex
- act.Dir = opts.Dir
- act.Recursive = opts.Recursive
- }
-
- doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
- resp, body, err := k.client.Do(doCtx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
- act := &getAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.Recursive = opts.Recursive
- act.Sorted = opts.Sort
- act.Quorum = opts.Quorum
- }
-
- resp, body, err := k.client.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
- act := waitAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.Recursive = opts.Recursive
- if opts.AfterIndex > 0 {
- act.WaitIndex = opts.AfterIndex + 1
- }
- }
-
- return &httpWatcher{
- client: k.client,
- nextWait: act,
- }
-}
-
-type httpWatcher struct {
- client httpClient
- nextWait waitAction
-}
-
-func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
- for {
- httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
- if err != nil {
- return nil, err
- }
-
- resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
- if err != nil {
- if err == ErrEmptyBody {
- continue
- }
- return nil, err
- }
-
- hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
- return resp, nil
- }
-}
-
-// v2KeysURL forms a URL representing the location of a key.
-// The endpoint argument represents the base URL of an etcd
-// server. The prefix is the path needed to route from the
-// provided endpoint's path to the root of the keys API
-// (typically "/v2/keys").
-func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
- // We concatenate all parts together manually. We cannot use
- // path.Join because it does not reserve trailing slash.
- // We call CanonicalURLPath to further cleanup the path.
- if prefix != "" && prefix[0] != '/' {
- prefix = "/" + prefix
- }
- if key != "" && key[0] != '/' {
- key = "/" + key
- }
- ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
- return &ep
-}
-
-type getAction struct {
- Prefix string
- Key string
- Recursive bool
- Sorted bool
- Quorum bool
-}
-
-func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, g.Prefix, g.Key)
-
- params := u.Query()
- params.Set("recursive", strconv.FormatBool(g.Recursive))
- params.Set("sorted", strconv.FormatBool(g.Sorted))
- params.Set("quorum", strconv.FormatBool(g.Quorum))
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type waitAction struct {
- Prefix string
- Key string
- WaitIndex uint64
- Recursive bool
-}
-
-func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, w.Prefix, w.Key)
-
- params := u.Query()
- params.Set("wait", "true")
- params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
- params.Set("recursive", strconv.FormatBool(w.Recursive))
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type setAction struct {
- Prefix string
- Key string
- Value string
- PrevValue string
- PrevIndex uint64
- PrevExist PrevExistType
- TTL time.Duration
- Refresh bool
- Dir bool
- NoValueOnSuccess bool
-}
-
-func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Key)
-
- params := u.Query()
- form := url.Values{}
-
- // we're either creating a directory or setting a key
- if a.Dir {
- params.Set("dir", strconv.FormatBool(a.Dir))
- } else {
- // These options are only valid for setting a key
- if a.PrevValue != "" {
- params.Set("prevValue", a.PrevValue)
- }
- form.Add("value", a.Value)
- }
-
- // Options which apply to both setting a key and creating a dir
- if a.PrevIndex != 0 {
- params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
- }
- if a.PrevExist != PrevIgnore {
- params.Set("prevExist", string(a.PrevExist))
- }
- if a.TTL > 0 {
- form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
- }
-
- if a.Refresh {
- form.Add("refresh", "true")
- }
- if a.NoValueOnSuccess {
- params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
- }
-
- u.RawQuery = params.Encode()
- body := strings.NewReader(form.Encode())
-
- req, _ := http.NewRequest("PUT", u.String(), body)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- return req
-}
-
-type deleteAction struct {
- Prefix string
- Key string
- PrevValue string
- PrevIndex uint64
- Dir bool
- Recursive bool
-}
-
-func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Key)
-
- params := u.Query()
- if a.PrevValue != "" {
- params.Set("prevValue", a.PrevValue)
- }
- if a.PrevIndex != 0 {
- params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
- }
- if a.Dir {
- params.Set("dir", "true")
- }
- if a.Recursive {
- params.Set("recursive", "true")
- }
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("DELETE", u.String(), nil)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- return req
-}
-
-type createInOrderAction struct {
- Prefix string
- Dir string
- Value string
- TTL time.Duration
-}
-
-func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Dir)
-
- form := url.Values{}
- form.Add("value", a.Value)
- if a.TTL > 0 {
- form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
- }
- body := strings.NewReader(form.Encode())
-
- req, _ := http.NewRequest("POST", u.String(), body)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- return req
-}
-
-func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
- switch code {
- case http.StatusOK, http.StatusCreated:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- res, err = unmarshalSuccessfulKeysResponse(header, body)
- default:
- err = unmarshalFailedKeysResponse(body)
- }
- return res, err
-}
-
-func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
- var res Response
- err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
- if err != nil {
- return nil, ErrInvalidJSON
- }
- if header.Get("X-Etcd-Index") != "" {
- res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
- if err != nil {
- return nil, err
- }
- }
- res.ClusterID = header.Get("X-Etcd-Cluster-ID")
- return &res, nil
-}
-
-func unmarshalFailedKeysResponse(body []byte) error {
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return ErrInvalidJSON
- }
- return etcdErr
-}
diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go
deleted file mode 100644
index aafa3d1..0000000
--- a/vendor/github.com/coreos/etcd/client/members.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "path"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-var (
- defaultV2MembersPrefix = "/v2/members"
- defaultLeaderSuffix = "/leader"
-)
-
-type Member struct {
- // ID is the unique identifier of this Member.
- ID string `json:"id"`
-
- // Name is a human-readable, non-unique identifier of this Member.
- Name string `json:"name"`
-
- // PeerURLs represents the HTTP(S) endpoints this Member uses to
- // participate in etcd's consensus protocol.
- PeerURLs []string `json:"peerURLs"`
-
- // ClientURLs represents the HTTP(S) endpoints on which this Member
- // serves its client-facing APIs.
- ClientURLs []string `json:"clientURLs"`
-}
-
-type memberCollection []Member
-
-func (c *memberCollection) UnmarshalJSON(data []byte) error {
- d := struct {
- Members []Member
- }{}
-
- if err := json.Unmarshal(data, &d); err != nil {
- return err
- }
-
- if d.Members == nil {
- *c = make([]Member, 0)
- return nil
- }
-
- *c = d.Members
- return nil
-}
-
-type memberCreateOrUpdateRequest struct {
- PeerURLs types.URLs
-}
-
-func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
- s := struct {
- PeerURLs []string `json:"peerURLs"`
- }{
- PeerURLs: make([]string, len(m.PeerURLs)),
- }
-
- for i, u := range m.PeerURLs {
- s.PeerURLs[i] = u.String()
- }
-
- return json.Marshal(&s)
-}
-
-// NewMembersAPI constructs a new MembersAPI that uses HTTP to
-// interact with etcd's membership API.
-func NewMembersAPI(c Client) MembersAPI {
- return &httpMembersAPI{
- client: c,
- }
-}
-
-type MembersAPI interface {
- // List enumerates the current cluster membership.
- List(ctx context.Context) ([]Member, error)
-
- // Add instructs etcd to accept a new Member into the cluster.
- Add(ctx context.Context, peerURL string) (*Member, error)
-
- // Remove demotes an existing Member out of the cluster.
- Remove(ctx context.Context, mID string) error
-
- // Update instructs etcd to update an existing Member in the cluster.
- Update(ctx context.Context, mID string, peerURLs []string) error
-
- // Leader gets current leader of the cluster
- Leader(ctx context.Context) (*Member, error)
-}
-
-type httpMembersAPI struct {
- client httpClient
-}
-
-func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
- req := &membersAPIActionList{}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
-
- var mCollection memberCollection
- if err := json.Unmarshal(body, &mCollection); err != nil {
- return nil, err
- }
-
- return []Member(mCollection), nil
-}
-
-func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
- urls, err := types.NewURLs([]string{peerURL})
- if err != nil {
- return nil, err
- }
-
- req := &membersAPIActionAdd{peerURLs: urls}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
- return nil, err
- }
-
- if resp.StatusCode != http.StatusCreated {
- var merr membersError
- if err := json.Unmarshal(body, &merr); err != nil {
- return nil, err
- }
- return nil, merr
- }
-
- var memb Member
- if err := json.Unmarshal(body, &memb); err != nil {
- return nil, err
- }
-
- return &memb, nil
-}
-
-func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
- urls, err := types.NewURLs(peerURLs)
- if err != nil {
- return err
- }
-
- req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
- return err
- }
-
- if resp.StatusCode != http.StatusNoContent {
- var merr membersError
- if err := json.Unmarshal(body, &merr); err != nil {
- return err
- }
- return merr
- }
-
- return nil
-}
-
-func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
- req := &membersAPIActionRemove{memberID: memberID}
- resp, _, err := m.client.Do(ctx, req)
- if err != nil {
- return err
- }
-
- return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
-}
-
-func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
- req := &membersAPIActionLeader{}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
-
- var leader Member
- if err := json.Unmarshal(body, &leader); err != nil {
- return nil, err
- }
-
- return &leader, nil
-}
-
-type membersAPIActionList struct{}
-
-func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type membersAPIActionRemove struct {
- memberID string
-}
-
-func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- u.Path = path.Join(u.Path, d.memberID)
- req, _ := http.NewRequest("DELETE", u.String(), nil)
- return req
-}
-
-type membersAPIActionAdd struct {
- peerURLs types.URLs
-}
-
-func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
- b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-type membersAPIActionUpdate struct {
- memberID string
- peerURLs types.URLs
-}
-
-func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
- u.Path = path.Join(u.Path, a.memberID)
- b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func assertStatusCode(got int, want ...int) (err error) {
- for _, w := range want {
- if w == got {
- return nil
- }
- }
- return fmt.Errorf("unexpected status code %d", got)
-}
-
-type membersAPIActionLeader struct{}
-
-func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- u.Path = path.Join(u.Path, defaultLeaderSuffix)
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-// v2MembersURL add the necessary path to the provided endpoint
-// to route requests to the default v2 members API.
-func v2MembersURL(ep url.URL) *url.URL {
- ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
- return &ep
-}
-
-type membersError struct {
- Message string `json:"message"`
- Code int `json:"-"`
-}
-
-func (e membersError) Error() string {
- return e.Message
-}
diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go
deleted file mode 100644
index 15a8bab..0000000
--- a/vendor/github.com/coreos/etcd/client/util.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "regexp"
-)
-
-var (
- roleNotFoundRegExp *regexp.Regexp
- userNotFoundRegExp *regexp.Regexp
-)
-
-func init() {
- roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
- userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
-}
-
-// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
-func IsKeyNotFound(err error) bool {
- if cErr, ok := err.(Error); ok {
- return cErr.Code == ErrorCodeKeyNotFound
- }
- return false
-}
-
-// IsRoleNotFound returns true if the error means role not found of v2 API.
-func IsRoleNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return roleNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
-
-// IsUserNotFound returns true if the error means user not found of v2 API.
-func IsUserNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return userNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md
deleted file mode 100644
index 376bfba..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# etcd/clientv3
-
-[](https://godoc.org/github.com/coreos/etcd/clientv3)
-
-`etcd/clientv3` is the official Go etcd client for v3.
-
-## Install
-
-```bash
-go get github.com/coreos/etcd/clientv3
-```
-
-## Get started
-
-Create client using `clientv3.New`:
-
-```go
-cli, err := clientv3.New(clientv3.Config{
- Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
- DialTimeout: 5 * time.Second,
-})
-if err != nil {
- // handle error!
-}
-defer cli.Close()
-```
-
-etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
-[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
-If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
-pass `context.WithTimeout` to APIs:
-
-```go
-ctx, cancel := context.WithTimeout(context.Background(), timeout)
-resp, err := cli.Put(ctx, "sample_key", "sample_value")
-cancel()
-if err != nil {
- // handle error!
-}
-// use the response
-```
-
-etcd uses `cmd/vendor` directory to store external dependencies, which are
-to be compiled into etcd release binaries. `client` can be imported without
-vendoring. For full compatibility, it is recommended to vendor builds using
-etcd's vendored packages, using tools like godep, as in
-[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
-For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
-
-## Error Handling
-
-etcd client returns 2 types of errors:
-
-1. context error: canceled or deadline exceeded.
-2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
-
-Here is the example code to handle client errors:
-
-```go
-resp, err := cli.Put(ctx, "", "")
-if err != nil {
- switch err {
- case context.Canceled:
- log.Fatalf("ctx is canceled by another routine: %v", err)
- case context.DeadlineExceeded:
- log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
- case rpctypes.ErrEmptyKey:
- log.Fatalf("client-side error: %v", err)
- default:
- log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
- }
-}
-```
-
-## Metrics
-
-The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
-
-## Namespacing
-
-The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
-
-## Examples
-
-More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go
deleted file mode 100644
index 7545bb6..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/auth.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/coreos/etcd/auth/authpb"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- AuthEnableResponse pb.AuthEnableResponse
- AuthDisableResponse pb.AuthDisableResponse
- AuthenticateResponse pb.AuthenticateResponse
- AuthUserAddResponse pb.AuthUserAddResponse
- AuthUserDeleteResponse pb.AuthUserDeleteResponse
- AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
- AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
- AuthUserGetResponse pb.AuthUserGetResponse
- AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
- AuthRoleAddResponse pb.AuthRoleAddResponse
- AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
- AuthRoleGetResponse pb.AuthRoleGetResponse
- AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
- AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
- AuthUserListResponse pb.AuthUserListResponse
- AuthRoleListResponse pb.AuthRoleListResponse
-
- PermissionType authpb.Permission_Type
- Permission authpb.Permission
-)
-
-const (
- PermRead = authpb.READ
- PermWrite = authpb.WRITE
- PermReadWrite = authpb.READWRITE
-)
-
-type Auth interface {
- // AuthEnable enables auth of an etcd cluster.
- AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
-
- // AuthDisable disables auth of an etcd cluster.
- AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
-
- // UserAdd adds a new user to an etcd cluster.
- UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
-
- // UserDelete deletes a user from an etcd cluster.
- UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
-
- // UserChangePassword changes a password of a user.
- UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
-
- // UserGrantRole grants a role to a user.
- UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
-
- // UserGet gets a detailed information of a user.
- UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
-
- // UserList gets a list of all users.
- UserList(ctx context.Context) (*AuthUserListResponse, error)
-
- // UserRevokeRole revokes a role of a user.
- UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
-
- // RoleAdd adds a new role to an etcd cluster.
- RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
-
- // RoleGrantPermission grants a permission to a role.
- RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
-
- // RoleGet gets a detailed information of a role.
- RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
-
- // RoleList gets a list of all roles.
- RoleList(ctx context.Context) (*AuthRoleListResponse, error)
-
- // RoleRevokePermission revokes a permission from a role.
- RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
-
- // RoleDelete deletes a role.
- RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
-}
-
-type auth struct {
- remote pb.AuthClient
- callOpts []grpc.CallOption
-}
-
-func NewAuth(c *Client) Auth {
- api := &auth{remote: RetryAuthClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
- resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
- return (*AuthEnableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
- resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
- return (*AuthDisableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
- resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
- resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
- return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
- resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
- resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
- return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
- resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
- return (*AuthUserGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
- resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
- return (*AuthUserListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
- resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
- return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
- resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
- return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
- perm := &authpb.Permission{
- Key: []byte(key),
- RangeEnd: []byte(rangeEnd),
- PermType: authpb.Permission_Type(permType),
- }
- resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
- return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
- resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
- resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
- return (*AuthRoleListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
- resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
- return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
- resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func StrToPermissionType(s string) (PermissionType, error) {
- val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
- if ok {
- return PermissionType(val), nil
- }
- return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
-}
-
-type authenticator struct {
- conn *grpc.ClientConn // conn in-use
- remote pb.AuthClient
- callOpts []grpc.CallOption
-}
-
-func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
- resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthenticateResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authenticator) close() {
- auth.conn.Close()
-}
-
-func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return nil, err
- }
-
- api := &authenticator{
- conn: conn,
- remote: pb.NewAuthClient(conn),
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api, nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go
deleted file mode 100644
index 7132807..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/client.go
+++ /dev/null
@@ -1,576 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "net/url"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-var (
- ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
- ErrOldCluster = errors.New("etcdclient: old cluster version")
-)
-
-// Client provides and manages an etcd v3 client session.
-type Client struct {
- Cluster
- KV
- Lease
- Watcher
- Auth
- Maintenance
-
- conn *grpc.ClientConn
- dialerrc chan error
-
- cfg Config
- creds *credentials.TransportCredentials
- balancer *healthBalancer
- mu *sync.Mutex
-
- ctx context.Context
- cancel context.CancelFunc
-
- // Username is a user name for authentication.
- Username string
- // Password is a password for authentication.
- Password string
- // tokenCred is an instance of WithPerRPCCredentials()'s argument
- tokenCred *authTokenCredential
-
- callOpts []grpc.CallOption
-}
-
-// New creates a new etcdv3 client from a given configuration.
-func New(cfg Config) (*Client, error) {
- if len(cfg.Endpoints) == 0 {
- return nil, ErrNoAvailableEndpoints
- }
-
- return newClient(&cfg)
-}
-
-// NewCtxClient creates a client with a context but no underlying grpc
-// connection. This is useful for embedded cases that override the
-// service interface implementations and do not need connection management.
-func NewCtxClient(ctx context.Context) *Client {
- cctx, cancel := context.WithCancel(ctx)
- return &Client{ctx: cctx, cancel: cancel}
-}
-
-// NewFromURL creates a new etcdv3 client from a URL.
-func NewFromURL(url string) (*Client, error) {
- return New(Config{Endpoints: []string{url}})
-}
-
-// Close shuts down the client's etcd connections.
-func (c *Client) Close() error {
- c.cancel()
- c.Watcher.Close()
- c.Lease.Close()
- if c.conn != nil {
- return toErr(c.ctx, c.conn.Close())
- }
- return c.ctx.Err()
-}
-
-// Ctx is a context for "out of band" messages (e.g., for sending
-// "clean up" message when another context is canceled). It is
-// canceled on client Close().
-func (c *Client) Ctx() context.Context { return c.ctx }
-
-// Endpoints lists the registered endpoints for the client.
-func (c *Client) Endpoints() (eps []string) {
- // copy the slice; protect original endpoints from being changed
- eps = make([]string, len(c.cfg.Endpoints))
- copy(eps, c.cfg.Endpoints)
- return
-}
-
-// SetEndpoints updates client's endpoints.
-func (c *Client) SetEndpoints(eps ...string) {
- c.mu.Lock()
- c.cfg.Endpoints = eps
- c.mu.Unlock()
- c.balancer.updateAddrs(eps...)
-
- // updating notifyCh can trigger new connections,
- // need update addrs if all connections are down
- // or addrs does not include pinAddr.
- c.balancer.mu.RLock()
- update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
- c.balancer.mu.RUnlock()
- if update {
- select {
- case c.balancer.updateAddrsC <- notifyNext:
- case <-c.balancer.stopc:
- }
- }
-}
-
-// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
-func (c *Client) Sync(ctx context.Context) error {
- mresp, err := c.MemberList(ctx)
- if err != nil {
- return err
- }
- var eps []string
- for _, m := range mresp.Members {
- eps = append(eps, m.ClientURLs...)
- }
- c.SetEndpoints(eps...)
- return nil
-}
-
-func (c *Client) autoSync() {
- if c.cfg.AutoSyncInterval == time.Duration(0) {
- return
- }
-
- for {
- select {
- case <-c.ctx.Done():
- return
- case <-time.After(c.cfg.AutoSyncInterval):
- ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
- err := c.Sync(ctx)
- cancel()
- if err != nil && err != c.ctx.Err() {
- logger.Println("Auto sync endpoints failed:", err)
- }
- }
- }
-}
-
-type authTokenCredential struct {
- token string
- tokenMu *sync.RWMutex
-}
-
-func (cred authTokenCredential) RequireTransportSecurity() bool {
- return false
-}
-
-func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
- cred.tokenMu.RLock()
- defer cred.tokenMu.RUnlock()
- return map[string]string{
- "token": cred.token,
- }, nil
-}
-
-func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
- proto = "tcp"
- host = endpoint
- url, uerr := url.Parse(endpoint)
- if uerr != nil || !strings.Contains(endpoint, "://") {
- return proto, host, scheme
- }
- scheme = url.Scheme
-
- // strip scheme:// prefix since grpc dials by host
- host = url.Host
- switch url.Scheme {
- case "http", "https":
- case "unix", "unixs":
- proto = "unix"
- host = url.Host + url.Path
- default:
- proto, host = "", ""
- }
- return proto, host, scheme
-}
-
-func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
- creds = c.creds
- switch scheme {
- case "unix":
- case "http":
- creds = nil
- case "https", "unixs":
- if creds != nil {
- break
- }
- tlsconfig := &tls.Config{}
- emptyCreds := credentials.NewTLS(tlsconfig)
- creds = &emptyCreds
- default:
- creds = nil
- }
- return creds
-}
-
-// dialSetupOpts gives the dial opts prior to any authentication
-func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
- if c.cfg.DialTimeout > 0 {
- opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
- }
- if c.cfg.DialKeepAliveTime > 0 {
- params := keepalive.ClientParameters{
- Time: c.cfg.DialKeepAliveTime,
- Timeout: c.cfg.DialKeepAliveTimeout,
- }
- opts = append(opts, grpc.WithKeepaliveParams(params))
- }
- opts = append(opts, dopts...)
-
- f := func(host string, t time.Duration) (net.Conn, error) {
- proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
- if host == "" && endpoint != "" {
- // dialing an endpoint not in the balancer; use
- // endpoint passed into dial
- proto, host, _ = parseEndpoint(endpoint)
- }
- if proto == "" {
- return nil, fmt.Errorf("unknown scheme for %q", host)
- }
- select {
- case <-c.ctx.Done():
- return nil, c.ctx.Err()
- default:
- }
- dialer := &net.Dialer{Timeout: t}
- conn, err := dialer.DialContext(c.ctx, proto, host)
- if err != nil {
- select {
- case c.dialerrc <- err:
- default:
- }
- }
- return conn, err
- }
- opts = append(opts, grpc.WithDialer(f))
-
- creds := c.creds
- if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
- creds = c.processCreds(scheme)
- }
- if creds != nil {
- opts = append(opts, grpc.WithTransportCredentials(*creds))
- } else {
- opts = append(opts, grpc.WithInsecure())
- }
-
- return opts
-}
-
-// Dial connects to a single endpoint using the client's config.
-func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
- return c.dial(endpoint)
-}
-
-func (c *Client) getToken(ctx context.Context) error {
- var err error // return last error in a case of fail
- var auth *authenticator
-
- for i := 0; i < len(c.cfg.Endpoints); i++ {
- endpoint := c.cfg.Endpoints[i]
- host := getHost(endpoint)
- // use dial options without dopts to avoid reusing the client balancer
- auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
- if err != nil {
- continue
- }
- defer auth.close()
-
- var resp *AuthenticateResponse
- resp, err = auth.authenticate(ctx, c.Username, c.Password)
- if err != nil {
- continue
- }
-
- c.tokenCred.tokenMu.Lock()
- c.tokenCred.token = resp.Token
- c.tokenCred.tokenMu.Unlock()
-
- return nil
- }
-
- return err
-}
-
-func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- opts := c.dialSetupOpts(endpoint, dopts...)
- host := getHost(endpoint)
- if c.Username != "" && c.Password != "" {
- c.tokenCred = &authTokenCredential{
- tokenMu: &sync.RWMutex{},
- }
-
- ctx := c.ctx
- if c.cfg.DialTimeout > 0 {
- cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
- defer cancel()
- ctx = cctx
- }
-
- err := c.getToken(ctx)
- if err != nil {
- if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
- if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
- err = context.DeadlineExceeded
- }
- return nil, err
- }
- } else {
- opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
- }
- }
-
- opts = append(opts, c.cfg.DialOptions...)
-
- conn, err := grpc.DialContext(c.ctx, host, opts...)
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-// WithRequireLeader requires client requests to only succeed
-// when the cluster has a leader.
-func WithRequireLeader(ctx context.Context) context.Context {
- md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
- return metadata.NewOutgoingContext(ctx, md)
-}
-
-func newClient(cfg *Config) (*Client, error) {
- if cfg == nil {
- cfg = &Config{}
- }
- var creds *credentials.TransportCredentials
- if cfg.TLS != nil {
- c := credentials.NewTLS(cfg.TLS)
- creds = &c
- }
-
- // use a temporary skeleton client to bootstrap first connection
- baseCtx := context.TODO()
- if cfg.Context != nil {
- baseCtx = cfg.Context
- }
-
- ctx, cancel := context.WithCancel(baseCtx)
- client := &Client{
- conn: nil,
- dialerrc: make(chan error, 1),
- cfg: *cfg,
- creds: creds,
- ctx: ctx,
- cancel: cancel,
- mu: new(sync.Mutex),
- callOpts: defaultCallOpts,
- }
- if cfg.Username != "" && cfg.Password != "" {
- client.Username = cfg.Username
- client.Password = cfg.Password
- }
- if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
- if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
- return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
- }
- callOpts := []grpc.CallOption{
- defaultFailFast,
- defaultMaxCallSendMsgSize,
- defaultMaxCallRecvMsgSize,
- }
- if cfg.MaxCallSendMsgSize > 0 {
- callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
- }
- if cfg.MaxCallRecvMsgSize > 0 {
- callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
- }
- client.callOpts = callOpts
- }
-
- client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
- return grpcHealthCheck(client, ep)
- })
-
- // use Endpoints[0] so that for https:// without any tls config given, then
- // grpc will assume the certificate server name is the endpoint host.
- conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
- if err != nil {
- client.cancel()
- client.balancer.Close()
- return nil, err
- }
- client.conn = conn
-
- // wait for a connection
- if cfg.DialTimeout > 0 {
- hasConn := false
- waitc := time.After(cfg.DialTimeout)
- select {
- case <-client.balancer.ready():
- hasConn = true
- case <-ctx.Done():
- case <-waitc:
- }
- if !hasConn {
- err := context.DeadlineExceeded
- select {
- case err = <-client.dialerrc:
- default:
- }
- client.cancel()
- client.balancer.Close()
- conn.Close()
- return nil, err
- }
- }
-
- client.Cluster = NewCluster(client)
- client.KV = NewKV(client)
- client.Lease = NewLease(client)
- client.Watcher = NewWatcher(client)
- client.Auth = NewAuth(client)
- client.Maintenance = NewMaintenance(client)
-
- if cfg.RejectOldCluster {
- if err := client.checkVersion(); err != nil {
- client.Close()
- return nil, err
- }
- }
-
- go client.autoSync()
- return client, nil
-}
-
-func (c *Client) checkVersion() (err error) {
- var wg sync.WaitGroup
- errc := make(chan error, len(c.cfg.Endpoints))
- ctx, cancel := context.WithCancel(c.ctx)
- if c.cfg.DialTimeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
- }
- wg.Add(len(c.cfg.Endpoints))
- for _, ep := range c.cfg.Endpoints {
- // if cluster is current, any endpoint gives a recent version
- go func(e string) {
- defer wg.Done()
- resp, rerr := c.Status(ctx, e)
- if rerr != nil {
- errc <- rerr
- return
- }
- vs := strings.Split(resp.Version, ".")
- maj, min := 0, 0
- if len(vs) >= 2 {
- maj, _ = strconv.Atoi(vs[0])
- min, rerr = strconv.Atoi(vs[1])
- }
- if maj < 3 || (maj == 3 && min < 2) {
- rerr = ErrOldCluster
- }
- errc <- rerr
- }(ep)
- }
- // wait for success
- for i := 0; i < len(c.cfg.Endpoints); i++ {
- if err = <-errc; err == nil {
- break
- }
- }
- cancel()
- wg.Wait()
- return err
-}
-
-// ActiveConnection returns the current in-use connection
-func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
-
-// isHaltErr returns true if the given error and context indicate no forward
-// progress can be made, even after reconnecting.
-func isHaltErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return true
- }
- if err == nil {
- return false
- }
- ev, _ := status.FromError(err)
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- // Treat Internal codes as if something failed, leaving the
- // system in an inconsistent state, but retrying could make progress.
- // (e.g., failed in middle of send, corrupted frame)
- // TODO: are permanent Internal errors possible from grpc?
- return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
-}
-
-// isUnavailableErr returns true if the given error is an unavailable error
-func isUnavailableErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return false
- }
- if err == nil {
- return false
- }
- ev, _ := status.FromError(err)
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- return ev.Code() == codes.Unavailable
-}
-
-func toErr(ctx context.Context, err error) error {
- if err == nil {
- return nil
- }
- err = rpctypes.Error(err)
- if _, ok := err.(rpctypes.EtcdError); ok {
- return err
- }
- ev, _ := status.FromError(err)
- code := ev.Code()
- switch code {
- case codes.DeadlineExceeded:
- fallthrough
- case codes.Canceled:
- if ctx.Err() != nil {
- err = ctx.Err()
- }
- case codes.Unavailable:
- case codes.FailedPrecondition:
- err = grpc.ErrClientConnClosing
- }
- return err
-}
-
-func canceledByCaller(stopCtx context.Context, err error) bool {
- if stopCtx.Err() == nil || err == nil {
- return false
- }
-
- return err == context.Canceled || err == context.DeadlineExceeded
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go
deleted file mode 100644
index 785672b..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/cluster.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
-
- "google.golang.org/grpc"
-)
-
-type (
- Member pb.Member
- MemberListResponse pb.MemberListResponse
- MemberAddResponse pb.MemberAddResponse
- MemberRemoveResponse pb.MemberRemoveResponse
- MemberUpdateResponse pb.MemberUpdateResponse
-)
-
-type Cluster interface {
- // MemberList lists the current cluster membership.
- MemberList(ctx context.Context) (*MemberListResponse, error)
-
- // MemberAdd adds a new member into the cluster.
- MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
-
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
-
- // MemberUpdate updates the peer addresses of the member.
- MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
-}
-
-type cluster struct {
- remote pb.ClusterClient
- callOpts []grpc.CallOption
-}
-
-func NewCluster(c *Client) Cluster {
- api := &cluster{remote: RetryClusterClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
- api := &cluster{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
- resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberAddResponse)(resp), nil
-}
-
-func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
- r := &pb.MemberRemoveRequest{ID: id}
- resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberRemoveResponse)(resp), nil
-}
-
-func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- // it is safe to retry on update.
- r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
- resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
- if err == nil {
- return (*MemberUpdateResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
- // it is safe to retry on list.
- resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
- if err == nil {
- return (*MemberListResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go
deleted file mode 100644
index 41e80c1..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-// CompactOp represents a compact operation.
-type CompactOp struct {
- revision int64
- physical bool
-}
-
-// CompactOption configures compact operation.
-type CompactOption func(*CompactOp)
-
-func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// OpCompact wraps slice CompactOption to create a CompactOp.
-func OpCompact(rev int64, opts ...CompactOption) CompactOp {
- ret := CompactOp{revision: rev}
- ret.applyCompactOpts(opts)
- return ret
-}
-
-func (op CompactOp) toRequest() *pb.CompactionRequest {
- return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
-}
-
-// WithCompactPhysical makes Compact wait until all compacted entries are
-// removed from the etcd server's storage.
-func WithCompactPhysical() CompactOption {
- return func(op *CompactOp) { op.physical = true }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go
deleted file mode 100644
index b5f0a25..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/compare.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type CompareTarget int
-type CompareResult int
-
-const (
- CompareVersion CompareTarget = iota
- CompareCreated
- CompareModified
- CompareValue
-)
-
-type Cmp pb.Compare
-
-func Compare(cmp Cmp, result string, v interface{}) Cmp {
- var r pb.Compare_CompareResult
-
- switch result {
- case "=":
- r = pb.Compare_EQUAL
- case "!=":
- r = pb.Compare_NOT_EQUAL
- case ">":
- r = pb.Compare_GREATER
- case "<":
- r = pb.Compare_LESS
- default:
- panic("Unknown result op")
- }
-
- cmp.Result = r
- switch cmp.Target {
- case pb.Compare_VALUE:
- val, ok := v.(string)
- if !ok {
- panic("bad compare value")
- }
- cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
- case pb.Compare_VERSION:
- cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
- case pb.Compare_CREATE:
- cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
- case pb.Compare_MOD:
- cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
- case pb.Compare_LEASE:
- cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
- default:
- panic("Unknown compare type")
- }
- return cmp
-}
-
-func Value(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
-}
-
-func Version(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
-}
-
-func CreateRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
-}
-
-func ModRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
-}
-
-// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
-// LeaseID is 0, otherwise known as `NoLease`.
-func LeaseValue(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
-}
-
-// KeyBytes returns the byte slice holding with the comparison key.
-func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
-
-// WithKeyBytes sets the byte slice for the comparison key.
-func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
-
-// ValueBytes returns the byte slice holding the comparison value, if any.
-func (cmp *Cmp) ValueBytes() []byte {
- if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
- return tu.Value
- }
- return nil
-}
-
-// WithValueBytes sets the byte slice for the comparison's value.
-func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
-
-// WithRange sets the comparison to scan the range [key, end).
-func (cmp Cmp) WithRange(end string) Cmp {
- cmp.RangeEnd = []byte(end)
- return cmp
-}
-
-// WithPrefix sets the comparison to scan all keys prefixed by the key.
-func (cmp Cmp) WithPrefix() Cmp {
- cmp.RangeEnd = getPrefix(cmp.Key)
- return cmp
-}
-
-// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
-func mustInt64(val interface{}) int64 {
- if v, ok := val.(int64); ok {
- return v
- }
- if v, ok := val.(int); ok {
- return int64(v)
- }
- panic("bad value")
-}
-
-// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
-// int64 otherwise.
-func mustInt64orLeaseID(val interface{}) int64 {
- if v, ok := val.(LeaseID); ok {
- return int64(v)
- }
- return mustInt64(val)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
deleted file mode 100644
index dcdbf51..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package concurrency implements concurrency operations on top of
-// etcd such as distributed locks, barriers, and elections.
-package concurrency
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
deleted file mode 100644
index e18a0ed..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "errors"
- "fmt"
-
- v3 "github.com/coreos/etcd/clientv3"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-var (
- ErrElectionNotLeader = errors.New("election: not leader")
- ErrElectionNoLeader = errors.New("election: no leader")
-)
-
-type Election struct {
- session *Session
-
- keyPrefix string
-
- leaderKey string
- leaderRev int64
- leaderSession *Session
- hdr *pb.ResponseHeader
-}
-
-// NewElection returns a new election on a given key prefix.
-func NewElection(s *Session, pfx string) *Election {
- return &Election{session: s, keyPrefix: pfx + "/"}
-}
-
-// ResumeElection initializes an election with a known leader.
-func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
- return &Election{
- session: s,
- leaderKey: leaderKey,
- leaderRev: leaderRev,
- leaderSession: s,
- }
-}
-
-// Campaign puts a value as eligible for the election. It blocks until
-// it is elected, an error occurs, or the context is cancelled.
-func (e *Election) Campaign(ctx context.Context, val string) error {
- s := e.session
- client := e.session.Client()
-
- k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
- txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
- txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
- txn = txn.Else(v3.OpGet(k))
- resp, err := txn.Commit()
- if err != nil {
- return err
- }
- e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
- if !resp.Succeeded {
- kv := resp.Responses[0].GetResponseRange().Kvs[0]
- e.leaderRev = kv.CreateRevision
- if string(kv.Value) != val {
- if err = e.Proclaim(ctx, val); err != nil {
- e.Resign(ctx)
- return err
- }
- }
- }
-
- _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
- if err != nil {
- // clean up in case of context cancel
- select {
- case <-ctx.Done():
- e.Resign(client.Ctx())
- default:
- e.leaderSession = nil
- }
- return err
- }
- e.hdr = resp.Header
-
- return nil
-}
-
-// Proclaim lets the leader announce a new value without another election.
-func (e *Election) Proclaim(ctx context.Context, val string) error {
- if e.leaderSession == nil {
- return ErrElectionNotLeader
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- txn := client.Txn(ctx).If(cmp)
- txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
- tresp, terr := txn.Commit()
- if terr != nil {
- return terr
- }
- if !tresp.Succeeded {
- e.leaderKey = ""
- return ErrElectionNotLeader
- }
-
- e.hdr = tresp.Header
- return nil
-}
-
-// Resign lets a leader start a new election.
-func (e *Election) Resign(ctx context.Context) (err error) {
- if e.leaderSession == nil {
- return nil
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
- if err == nil {
- e.hdr = resp.Header
- }
- e.leaderKey = ""
- e.leaderSession = nil
- return err
-}
-
-// Leader returns the leader value for the current election.
-func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
- client := e.session.Client()
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return nil, err
- } else if len(resp.Kvs) == 0 {
- // no leader currently elected
- return nil, ErrElectionNoLeader
- }
- return resp, nil
-}
-
-// Observe returns a channel that reliably observes ordered leader proposals
-// as GetResponse values on every current elected leader key. It will not
-// necessarily fetch all historical leader updates, but will always post the
-// most recent leader value.
-//
-// The channel closes when the context is canceled or the underlying watcher
-// is otherwise disrupted.
-func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
- retc := make(chan v3.GetResponse)
- go e.observe(ctx, retc)
- return retc
-}
-
-func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
- client := e.session.Client()
-
- defer close(ch)
- for {
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return
- }
-
- var kv *mvccpb.KeyValue
- var hdr *pb.ResponseHeader
-
- if len(resp.Kvs) == 0 {
- cctx, cancel := context.WithCancel(ctx)
- // wait for first key put on prefix
- opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
- wch := client.Watch(cctx, e.keyPrefix, opts...)
- for kv == nil {
- wr, ok := <-wch
- if !ok || wr.Err() != nil {
- cancel()
- return
- }
- // only accept puts; a delete will make observe() spin
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.PUT {
- hdr, kv = &wr.Header, ev.Kv
- // may have multiple revs; hdr.rev = the last rev
- // set to kv's rev in case batch has multiple Puts
- hdr.Revision = kv.ModRevision
- break
- }
- }
- }
- cancel()
- } else {
- hdr, kv = resp.Header, resp.Kvs[0]
- }
-
- select {
- case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
- case <-ctx.Done():
- return
- }
-
- cctx, cancel := context.WithCancel(ctx)
- wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
- keyDeleted := false
- for !keyDeleted {
- wr, ok := <-wch
- if !ok {
- cancel()
- return
- }
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- keyDeleted = true
- break
- }
- resp.Header = &wr.Header
- resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
- select {
- case ch <- *resp:
- case <-cctx.Done():
- cancel()
- return
- }
- }
- }
- cancel()
- }
-}
-
-// Key returns the leader key if elected, empty string otherwise.
-func (e *Election) Key() string { return e.leaderKey }
-
-// Rev returns the leader key's creation revision, if elected.
-func (e *Election) Rev() int64 { return e.leaderRev }
-
-// Header is the response header from the last successful election proposal.
-func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
deleted file mode 100644
index 4b6e399..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "fmt"
-
- v3 "github.com/coreos/etcd/clientv3"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- var wr v3.WatchResponse
- wch := client.Watch(cctx, key, v3.WithRev(rev))
- for wr = range wch {
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- return nil
- }
- }
- }
- if err := wr.Err(); err != nil {
- return err
- }
- if err := ctx.Err(); err != nil {
- return err
- }
- return fmt.Errorf("lost watcher waiting for delete")
-}
-
-// waitDeletes efficiently waits until all keys matching the prefix and no greater
-// than the create revision.
-func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
- getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
- for {
- resp, err := client.Get(ctx, pfx, getOpts...)
- if err != nil {
- return nil, err
- }
- if len(resp.Kvs) == 0 {
- return resp.Header, nil
- }
- lastKey := string(resp.Kvs[0].Key)
- if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
- return nil, err
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
deleted file mode 100644
index 77b3582..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "fmt"
- "sync"
-
- v3 "github.com/coreos/etcd/clientv3"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-// Mutex implements the sync Locker interface with etcd
-type Mutex struct {
- s *Session
-
- pfx string
- myKey string
- myRev int64
- hdr *pb.ResponseHeader
-}
-
-func NewMutex(s *Session, pfx string) *Mutex {
- return &Mutex{s, pfx + "/", "", -1, nil}
-}
-
-// Lock locks the mutex with a cancelable context. If the context is canceled
-// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
-func (m *Mutex) Lock(ctx context.Context) error {
- s := m.s
- client := m.s.Client()
-
- m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
- cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
- // put self in lock waiters via myKey; oldest waiter holds lock
- put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
- // reuse key in case this session already holds the lock
- get := v3.OpGet(m.myKey)
- // fetch current holder to complete uncontended path with only one RPC
- getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
- resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
- if err != nil {
- return err
- }
- m.myRev = resp.Header.Revision
- if !resp.Succeeded {
- m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
- }
- // if no key on prefix / the minimum rev is key, already hold the lock
- ownerKey := resp.Responses[1].GetResponseRange().Kvs
- if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
- m.hdr = resp.Header
- return nil
- }
-
- // wait for deletion revisions prior to myKey
- hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
- // release lock key if wait failed
- if werr != nil {
- m.Unlock(client.Ctx())
- } else {
- m.hdr = hdr
- }
- return werr
-}
-
-func (m *Mutex) Unlock(ctx context.Context) error {
- client := m.s.Client()
- if _, err := client.Delete(ctx, m.myKey); err != nil {
- return err
- }
- m.myKey = "\x00"
- m.myRev = -1
- return nil
-}
-
-func (m *Mutex) IsOwner() v3.Cmp {
- return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
-}
-
-func (m *Mutex) Key() string { return m.myKey }
-
-// Header is the response header received from etcd on acquiring the lock.
-func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
-
-type lockerMutex struct{ *Mutex }
-
-func (lm *lockerMutex) Lock() {
- client := lm.s.Client()
- if err := lm.Mutex.Lock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-func (lm *lockerMutex) Unlock() {
- client := lm.s.Client()
- if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-
-// NewLocker creates a sync.Locker backed by an etcd mutex.
-func NewLocker(s *Session, pfx string) sync.Locker {
- return &lockerMutex{NewMutex(s, pfx)}
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
deleted file mode 100644
index c399d64..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "time"
-
- v3 "github.com/coreos/etcd/clientv3"
-)
-
-const defaultSessionTTL = 60
-
-// Session represents a lease kept alive for the lifetime of a client.
-// Fault-tolerant applications may use sessions to reason about liveness.
-type Session struct {
- client *v3.Client
- opts *sessionOptions
- id v3.LeaseID
-
- cancel context.CancelFunc
- donec <-chan struct{}
-}
-
-// NewSession gets the leased session for a client.
-func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
- ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
- for _, opt := range opts {
- opt(ops)
- }
-
- id := ops.leaseID
- if id == v3.NoLease {
- resp, err := client.Grant(ops.ctx, int64(ops.ttl))
- if err != nil {
- return nil, err
- }
- id = v3.LeaseID(resp.ID)
- }
-
- ctx, cancel := context.WithCancel(ops.ctx)
- keepAlive, err := client.KeepAlive(ctx, id)
- if err != nil || keepAlive == nil {
- cancel()
- return nil, err
- }
-
- donec := make(chan struct{})
- s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
-
- // keep the lease alive until client error or cancelled context
- go func() {
- defer close(donec)
- for range keepAlive {
- // eat messages until keep alive channel closes
- }
- }()
-
- return s, nil
-}
-
-// Client is the etcd client that is attached to the session.
-func (s *Session) Client() *v3.Client {
- return s.client
-}
-
-// Lease is the lease ID for keys bound to the session.
-func (s *Session) Lease() v3.LeaseID { return s.id }
-
-// Done returns a channel that closes when the lease is orphaned, expires, or
-// is otherwise no longer being refreshed.
-func (s *Session) Done() <-chan struct{} { return s.donec }
-
-// Orphan ends the refresh for the session lease. This is useful
-// in case the state of the client connection is indeterminate (revoke
-// would fail) or when transferring lease ownership.
-func (s *Session) Orphan() {
- s.cancel()
- <-s.donec
-}
-
-// Close orphans the session and revokes the session lease.
-func (s *Session) Close() error {
- s.Orphan()
- // if revoke takes longer than the ttl, lease is expired anyway
- ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
- _, err := s.client.Revoke(ctx, s.id)
- cancel()
- return err
-}
-
-type sessionOptions struct {
- ttl int
- leaseID v3.LeaseID
- ctx context.Context
-}
-
-// SessionOption configures Session.
-type SessionOption func(*sessionOptions)
-
-// WithTTL configures the session's TTL in seconds.
-// If TTL is <= 0, the default 60 seconds TTL will be used.
-func WithTTL(ttl int) SessionOption {
- return func(so *sessionOptions) {
- if ttl > 0 {
- so.ttl = ttl
- }
- }
-}
-
-// WithLease specifies the existing leaseID to be used for the session.
-// This is useful in process restart scenario, for example, to reclaim
-// leadership from an election prior to restart.
-func WithLease(leaseID v3.LeaseID) SessionOption {
- return func(so *sessionOptions) {
- so.leaseID = leaseID
- }
-}
-
-// WithContext assigns a context to the session instead of defaulting to
-// using the client context. This is useful for canceling NewSession and
-// Close operations immediately without having to close the client. If the
-// context is canceled before Close() completes, the session's lease will be
-// abandoned and left to expire instead of being revoked.
-func WithContext(ctx context.Context) SessionOption {
- return func(so *sessionOptions) {
- so.ctx = ctx
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
deleted file mode 100644
index d11023e..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "math"
-
- v3 "github.com/coreos/etcd/clientv3"
-)
-
-// STM is an interface for software transactional memory.
-type STM interface {
- // Get returns the value for a key and inserts the key in the txn's read set.
- // If Get fails, it aborts the transaction with an error, never returning.
- Get(key ...string) string
- // Put adds a value for a key to the write set.
- Put(key, val string, opts ...v3.OpOption)
- // Rev returns the revision of a key in the read set.
- Rev(key string) int64
- // Del deletes a key.
- Del(key string)
-
- // commit attempts to apply the txn's changes to the server.
- commit() *v3.TxnResponse
- reset()
-}
-
-// Isolation is an enumeration of transactional isolation levels which
-// describes how transactions should interfere and conflict.
-type Isolation int
-
-const (
- // SerializableSnapshot provides serializable isolation and also checks
- // for write conflicts.
- SerializableSnapshot Isolation = iota
- // Serializable reads within the same transaction attempt return data
- // from the at the revision of the first read.
- Serializable
- // RepeatableReads reads within the same transaction attempt always
- // return the same data.
- RepeatableReads
- // ReadCommitted reads keys from any committed revision.
- ReadCommitted
-)
-
-// stmError safely passes STM errors through panic to the STM error channel.
-type stmError struct{ err error }
-
-type stmOptions struct {
- iso Isolation
- ctx context.Context
- prefetch []string
-}
-
-type stmOption func(*stmOptions)
-
-// WithIsolation specifies the transaction isolation level.
-func WithIsolation(lvl Isolation) stmOption {
- return func(so *stmOptions) { so.iso = lvl }
-}
-
-// WithAbortContext specifies the context for permanently aborting the transaction.
-func WithAbortContext(ctx context.Context) stmOption {
- return func(so *stmOptions) { so.ctx = ctx }
-}
-
-// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
-// If an STM transaction will unconditionally fetch a set of keys, prefetching
-// those keys will save the round-trip cost from requesting each key one by one
-// with Get().
-func WithPrefetch(keys ...string) stmOption {
- return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
-}
-
-// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
-func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
- opts := &stmOptions{ctx: c.Ctx()}
- for _, f := range so {
- f(opts)
- }
- if len(opts.prefetch) != 0 {
- f := apply
- apply = func(s STM) error {
- s.Get(opts.prefetch...)
- return f(s)
- }
- }
- return runSTM(mkSTM(c, opts), apply)
-}
-
-func mkSTM(c *v3.Client, opts *stmOptions) STM {
- switch opts.iso {
- case SerializableSnapshot:
- s := &stmSerializable{
- stm: stm{client: c, ctx: opts.ctx},
- prefetch: make(map[string]*v3.GetResponse),
- }
- s.conflicts = func() []v3.Cmp {
- return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
- }
- return s
- case Serializable:
- s := &stmSerializable{
- stm: stm{client: c, ctx: opts.ctx},
- prefetch: make(map[string]*v3.GetResponse),
- }
- s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
- return s
- case RepeatableReads:
- s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
- s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
- return s
- case ReadCommitted:
- s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
- s.conflicts = func() []v3.Cmp { return nil }
- return s
- default:
- panic("unsupported stm")
- }
-}
-
-type stmResponse struct {
- resp *v3.TxnResponse
- err error
-}
-
-func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
- outc := make(chan stmResponse, 1)
- go func() {
- defer func() {
- if r := recover(); r != nil {
- e, ok := r.(stmError)
- if !ok {
- // client apply panicked
- panic(r)
- }
- outc <- stmResponse{nil, e.err}
- }
- }()
- var out stmResponse
- for {
- s.reset()
- if out.err = apply(s); out.err != nil {
- break
- }
- if out.resp = s.commit(); out.resp != nil {
- break
- }
- }
- outc <- out
- }()
- r := <-outc
- return r.resp, r.err
-}
-
-// stm implements repeatable-read software transactional memory over etcd
-type stm struct {
- client *v3.Client
- ctx context.Context
- // rset holds read key values and revisions
- rset readSet
- // wset holds overwritten keys and their values
- wset writeSet
- // getOpts are the opts used for gets
- getOpts []v3.OpOption
- // conflicts computes the current conflicts on the txn
- conflicts func() []v3.Cmp
-}
-
-type stmPut struct {
- val string
- op v3.Op
-}
-
-type readSet map[string]*v3.GetResponse
-
-func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
- for i, resp := range txnresp.Responses {
- rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
- }
-}
-
-// first returns the store revision from the first fetch
-func (rs readSet) first() int64 {
- ret := int64(math.MaxInt64 - 1)
- for _, resp := range rs {
- if rev := resp.Header.Revision; rev < ret {
- ret = rev
- }
- }
- return ret
-}
-
-// cmps guards the txn from updates to read set
-func (rs readSet) cmps() []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(rs))
- for k, rk := range rs {
- cmps = append(cmps, isKeyCurrent(k, rk))
- }
- return cmps
-}
-
-type writeSet map[string]stmPut
-
-func (ws writeSet) get(keys ...string) *stmPut {
- for _, key := range keys {
- if wv, ok := ws[key]; ok {
- return &wv
- }
- }
- return nil
-}
-
-// cmps returns a cmp list testing no writes have happened past rev
-func (ws writeSet) cmps(rev int64) []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(ws))
- for key := range ws {
- cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
- }
- return cmps
-}
-
-// puts is the list of ops for all pending writes
-func (ws writeSet) puts() []v3.Op {
- puts := make([]v3.Op, 0, len(ws))
- for _, v := range ws {
- puts = append(puts, v.op)
- }
- return puts
-}
-
-func (s *stm) Get(keys ...string) string {
- if wv := s.wset.get(keys...); wv != nil {
- return wv.val
- }
- return respToValue(s.fetch(keys...))
-}
-
-func (s *stm) Put(key, val string, opts ...v3.OpOption) {
- s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
-}
-
-func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
-
-func (s *stm) Rev(key string) int64 {
- if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
- return resp.Kvs[0].ModRevision
- }
- return 0
-}
-
-func (s *stm) commit() *v3.TxnResponse {
- txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- if txnresp.Succeeded {
- return txnresp
- }
- return nil
-}
-
-func (s *stm) fetch(keys ...string) *v3.GetResponse {
- if len(keys) == 0 {
- return nil
- }
- ops := make([]v3.Op, len(keys))
- for i, key := range keys {
- if resp, ok := s.rset[key]; ok {
- return resp
- }
- ops[i] = v3.OpGet(key, s.getOpts...)
- }
- txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- s.rset.add(keys, txnresp)
- return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
-}
-
-func (s *stm) reset() {
- s.rset = make(map[string]*v3.GetResponse)
- s.wset = make(map[string]stmPut)
-}
-
-type stmSerializable struct {
- stm
- prefetch map[string]*v3.GetResponse
-}
-
-func (s *stmSerializable) Get(keys ...string) string {
- if wv := s.wset.get(keys...); wv != nil {
- return wv.val
- }
- firstRead := len(s.rset) == 0
- for _, key := range keys {
- if resp, ok := s.prefetch[key]; ok {
- delete(s.prefetch, key)
- s.rset[key] = resp
- }
- }
- resp := s.stm.fetch(keys...)
- if firstRead {
- // txn's base revision is defined by the first read
- s.getOpts = []v3.OpOption{
- v3.WithRev(resp.Header.Revision),
- v3.WithSerializable(),
- }
- }
- return respToValue(resp)
-}
-
-func (s *stmSerializable) Rev(key string) int64 {
- s.Get(key)
- return s.stm.Rev(key)
-}
-
-func (s *stmSerializable) gets() ([]string, []v3.Op) {
- keys := make([]string, 0, len(s.rset))
- ops := make([]v3.Op, 0, len(s.rset))
- for k := range s.rset {
- keys = append(keys, k)
- ops = append(ops, v3.OpGet(k))
- }
- return keys, ops
-}
-
-func (s *stmSerializable) commit() *v3.TxnResponse {
- keys, getops := s.gets()
- txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
- // use Else to prefetch keys in case of conflict to save a round trip
- txnresp, err := txn.Else(getops...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- if txnresp.Succeeded {
- return txnresp
- }
- // load prefetch with Else data
- s.rset.add(keys, txnresp)
- s.prefetch = s.rset
- s.getOpts = nil
- return nil
-}
-
-func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
- if len(r.Kvs) != 0 {
- return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
- }
- return v3.Compare(v3.ModRevision(k), "=", 0)
-}
-
-func respToValue(resp *v3.GetResponse) string {
- if resp == nil || len(resp.Kvs) == 0 {
- return ""
- }
- return string(resp.Kvs[0].Value)
-}
-
-// NewSTMRepeatable is deprecated.
-func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
-}
-
-// NewSTMSerializable is deprecated.
-func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
-}
-
-// NewSTMReadCommitted is deprecated.
-func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go
deleted file mode 100644
index 79d6e2a..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/config.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "crypto/tls"
- "time"
-
- "google.golang.org/grpc"
-)
-
-type Config struct {
- // Endpoints is a list of URLs.
- Endpoints []string `json:"endpoints"`
-
- // AutoSyncInterval is the interval to update endpoints with its latest members.
- // 0 disables auto-sync. By default auto-sync is disabled.
- AutoSyncInterval time.Duration `json:"auto-sync-interval"`
-
- // DialTimeout is the timeout for failing to establish a connection.
- DialTimeout time.Duration `json:"dial-timeout"`
-
- // DialKeepAliveTime is the time after which client pings the server to see if
- // transport is alive.
- DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
-
- // DialKeepAliveTimeout is the time that the client waits for a response for the
- // keep-alive probe. If the response is not received in this time, the connection is closed.
- DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
-
- // MaxCallSendMsgSize is the client-side request send limit in bytes.
- // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
- // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
- MaxCallSendMsgSize int
-
- // MaxCallRecvMsgSize is the client-side response receive limit.
- // If 0, it defaults to "math.MaxInt32", because range response can
- // easily exceed request send limits.
- // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
- MaxCallRecvMsgSize int
-
- // TLS holds the client secure credentials, if any.
- TLS *tls.Config
-
- // Username is a user name for authentication.
- Username string `json:"username"`
-
- // Password is a password for authentication.
- Password string `json:"password"`
-
- // RejectOldCluster when set will refuse to create a client against an outdated cluster.
- RejectOldCluster bool `json:"reject-old-cluster"`
-
- // DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
- DialOptions []grpc.DialOption
-
- // Context is the default client context; it can be used to cancel grpc dial out and
- // other operations that do not have an explicit context.
- Context context.Context
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go
deleted file mode 100644
index 717fbe4..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/doc.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package clientv3 implements the official Go etcd client for v3.
-//
-// Create client using `clientv3.New`:
-//
-// // expect dial time-out on ipv4 blackhole
-// _, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"http://254.0.0.1:12345"},
-// DialTimeout: 2 * time.Second
-// })
-//
-// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
-// if err == context.DeadlineExceeded {
-// // handle errors
-// }
-//
-// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
-// if err == grpc.ErrClientConnTimeout {
-// // handle errors
-// }
-//
-// cli, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
-// DialTimeout: 5 * time.Second,
-// })
-// if err != nil {
-// // handle error!
-// }
-// defer cli.Close()
-//
-// Make sure to close the client after using it. If the client is not closed, the
-// connection will have leaky goroutines.
-//
-// To specify a client request timeout, wrap the context with context.WithTimeout:
-//
-// ctx, cancel := context.WithTimeout(context.Background(), timeout)
-// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
-// cancel()
-// if err != nil {
-// // handle error!
-// }
-// // use the response
-//
-// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
-// Clients are safe for concurrent use by multiple goroutines.
-//
-// etcd client returns 3 types of errors:
-//
-// 1. context error: canceled or deadline exceeded.
-// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
-//
-// Here is the example code to handle client errors:
-//
-// resp, err := kvc.Put(ctx, "", "")
-// if err != nil {
-// if err == context.Canceled {
-// // ctx is canceled by another routine
-// } else if err == context.DeadlineExceeded {
-// // ctx is attached with a deadline and it exceeded
-// } else if ev, ok := status.FromError(err); ok {
-// code := ev.Code()
-// if code == codes.DeadlineExceeded {
-// // server-side context might have timed-out first (due to clock skew)
-// // while original client-side context is not timed-out yet
-// }
-// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
-// // process (verr.Errors)
-// } else {
-// // bad cluster endpoints, which are not etcd servers
-// }
-// }
-//
-// go func() { cli.Close() }()
-// _, err := kvc.Get(ctx, "a")
-// if err != nil {
-// if err == context.Canceled {
-// // grpc balancer calls 'Get' with an inflight client.Close
-// } else if err == grpc.ErrClientConnClosing {
-// // grpc balancer calls 'Get' after client.Close.
-// }
-// }
-//
-package clientv3
diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go
deleted file mode 100644
index 5918cba..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go
+++ /dev/null
@@ -1,609 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- healthpb "google.golang.org/grpc/health/grpc_health_v1"
- "google.golang.org/grpc/status"
-)
-
-const (
- minHealthRetryDuration = 3 * time.Second
- unknownService = "unknown service grpc.health.v1.Health"
-)
-
-// ErrNoAddrAvilable is returned by Get() when the balancer does not have
-// any active connection to endpoints at the time.
-// This error is returned only when opts.BlockingWait is true.
-var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
-
-type healthCheckFunc func(ep string) (bool, error)
-
-type notifyMsg int
-
-const (
- notifyReset notifyMsg = iota
- notifyNext
-)
-
-// healthBalancer does the bare minimum to expose multiple eps
-// to the grpc reconnection code path
-type healthBalancer struct {
- // addrs are the client's endpoint addresses for grpc
- addrs []grpc.Address
-
- // eps holds the raw endpoints from the client
- eps []string
-
- // notifyCh notifies grpc of the set of addresses for connecting
- notifyCh chan []grpc.Address
-
- // readyc closes once the first connection is up
- readyc chan struct{}
- readyOnce sync.Once
-
- // healthCheck checks an endpoint's health.
- healthCheck healthCheckFunc
- healthCheckTimeout time.Duration
-
- unhealthyMu sync.RWMutex
- unhealthyHostPorts map[string]time.Time
-
- // mu protects all fields below.
- mu sync.RWMutex
-
- // upc closes when pinAddr transitions from empty to non-empty or the balancer closes.
- upc chan struct{}
-
- // downc closes when grpc calls down() on pinAddr
- downc chan struct{}
-
- // stopc is closed to signal updateNotifyLoop should stop.
- stopc chan struct{}
- stopOnce sync.Once
- wg sync.WaitGroup
-
- // donec closes when all goroutines are exited
- donec chan struct{}
-
- // updateAddrsC notifies updateNotifyLoop to update addrs.
- updateAddrsC chan notifyMsg
-
- // grpc issues TLS cert checks using the string passed into dial so
- // that string must be the host. To recover the full scheme://host URL,
- // have a map from hosts to the original endpoint.
- hostPort2ep map[string]string
-
- // pinAddr is the currently pinned address; set to the empty string on
- // initialization and shutdown.
- pinAddr string
-
- closed bool
-}
-
-func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
- notifyCh := make(chan []grpc.Address)
- addrs := eps2addrs(eps)
- hb := &healthBalancer{
- addrs: addrs,
- eps: eps,
- notifyCh: notifyCh,
- readyc: make(chan struct{}),
- healthCheck: hc,
- unhealthyHostPorts: make(map[string]time.Time),
- upc: make(chan struct{}),
- stopc: make(chan struct{}),
- downc: make(chan struct{}),
- donec: make(chan struct{}),
- updateAddrsC: make(chan notifyMsg),
- hostPort2ep: getHostPort2ep(eps),
- }
- if timeout < minHealthRetryDuration {
- timeout = minHealthRetryDuration
- }
- hb.healthCheckTimeout = timeout
-
- close(hb.downc)
- go hb.updateNotifyLoop()
- hb.wg.Add(1)
- go func() {
- defer hb.wg.Done()
- hb.updateUnhealthy()
- }()
- return hb
-}
-
-func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
-
-func (b *healthBalancer) ConnectNotify() <-chan struct{} {
- b.mu.Lock()
- defer b.mu.Unlock()
- return b.upc
-}
-
-func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
-
-func (b *healthBalancer) endpoint(hostPort string) string {
- b.mu.RLock()
- defer b.mu.RUnlock()
- return b.hostPort2ep[hostPort]
-}
-
-func (b *healthBalancer) pinned() string {
- b.mu.RLock()
- defer b.mu.RUnlock()
- return b.pinAddr
-}
-
-func (b *healthBalancer) hostPortError(hostPort string, err error) {
- if b.endpoint(hostPort) == "" {
- logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
- return
- }
-
- b.unhealthyMu.Lock()
- b.unhealthyHostPorts[hostPort] = time.Now()
- b.unhealthyMu.Unlock()
- logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
-}
-
-func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
- if b.endpoint(hostPort) == "" {
- logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
- return
- }
-
- b.unhealthyMu.Lock()
- delete(b.unhealthyHostPorts, hostPort)
- b.unhealthyMu.Unlock()
- logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
-}
-
-func (b *healthBalancer) countUnhealthy() (count int) {
- b.unhealthyMu.RLock()
- count = len(b.unhealthyHostPorts)
- b.unhealthyMu.RUnlock()
- return count
-}
-
-func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
- b.unhealthyMu.RLock()
- _, unhealthy = b.unhealthyHostPorts[hostPort]
- b.unhealthyMu.RUnlock()
- return unhealthy
-}
-
-func (b *healthBalancer) cleanupUnhealthy() {
- b.unhealthyMu.Lock()
- for k, v := range b.unhealthyHostPorts {
- if time.Since(v) > b.healthCheckTimeout {
- delete(b.unhealthyHostPorts, k)
- logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
- }
- }
- b.unhealthyMu.Unlock()
-}
-
-func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
- unhealthyCnt := b.countUnhealthy()
-
- b.mu.RLock()
- defer b.mu.RUnlock()
-
- hbAddrs := b.addrs
- if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) {
- liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep))
- for k := range b.hostPort2ep {
- liveHostPorts[k] = struct{}{}
- }
- return hbAddrs, liveHostPorts
- }
-
- addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt)
- liveHostPorts := make(map[string]struct{}, len(addrs))
- for _, addr := range b.addrs {
- if !b.isUnhealthy(addr.Addr) {
- addrs = append(addrs, addr)
- liveHostPorts[addr.Addr] = struct{}{}
- }
- }
- return addrs, liveHostPorts
-}
-
-func (b *healthBalancer) updateUnhealthy() {
- for {
- select {
- case <-time.After(b.healthCheckTimeout):
- b.cleanupUnhealthy()
- pinned := b.pinned()
- if pinned == "" || b.isUnhealthy(pinned) {
- select {
- case b.updateAddrsC <- notifyNext:
- case <-b.stopc:
- return
- }
- }
- case <-b.stopc:
- return
- }
- }
-}
-
-func (b *healthBalancer) updateAddrs(eps ...string) {
- np := getHostPort2ep(eps)
-
- b.mu.Lock()
- defer b.mu.Unlock()
-
- match := len(np) == len(b.hostPort2ep)
- if match {
- for k, v := range np {
- if b.hostPort2ep[k] != v {
- match = false
- break
- }
- }
- }
- if match {
- // same endpoints, so no need to update address
- return
- }
-
- b.hostPort2ep = np
- b.addrs, b.eps = eps2addrs(eps), eps
-
- b.unhealthyMu.Lock()
- b.unhealthyHostPorts = make(map[string]time.Time)
- b.unhealthyMu.Unlock()
-}
-
-func (b *healthBalancer) next() {
- b.mu.RLock()
- downc := b.downc
- b.mu.RUnlock()
- select {
- case b.updateAddrsC <- notifyNext:
- case <-b.stopc:
- }
- // wait until disconnect so new RPCs are not issued on old connection
- select {
- case <-downc:
- case <-b.stopc:
- }
-}
-
-func (b *healthBalancer) updateNotifyLoop() {
- defer close(b.donec)
-
- for {
- b.mu.RLock()
- upc, downc, addr := b.upc, b.downc, b.pinAddr
- b.mu.RUnlock()
- // downc or upc should be closed
- select {
- case <-downc:
- downc = nil
- default:
- }
- select {
- case <-upc:
- upc = nil
- default:
- }
- switch {
- case downc == nil && upc == nil:
- // stale
- select {
- case <-b.stopc:
- return
- default:
- }
- case downc == nil:
- b.notifyAddrs(notifyReset)
- select {
- case <-upc:
- case msg := <-b.updateAddrsC:
- b.notifyAddrs(msg)
- case <-b.stopc:
- return
- }
- case upc == nil:
- select {
- // close connections that are not the pinned address
- case b.notifyCh <- []grpc.Address{{Addr: addr}}:
- case <-downc:
- case <-b.stopc:
- return
- }
- select {
- case <-downc:
- b.notifyAddrs(notifyReset)
- case msg := <-b.updateAddrsC:
- b.notifyAddrs(msg)
- case <-b.stopc:
- return
- }
- }
- }
-}
-
-func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
- if msg == notifyNext {
- select {
- case b.notifyCh <- []grpc.Address{}:
- case <-b.stopc:
- return
- }
- }
- b.mu.RLock()
- pinAddr := b.pinAddr
- downc := b.downc
- b.mu.RUnlock()
- addrs, hostPorts := b.liveAddrs()
-
- var waitDown bool
- if pinAddr != "" {
- _, ok := hostPorts[pinAddr]
- waitDown = !ok
- }
-
- select {
- case b.notifyCh <- addrs:
- if waitDown {
- select {
- case <-downc:
- case <-b.stopc:
- }
- }
- case <-b.stopc:
- }
-}
-
-func (b *healthBalancer) Up(addr grpc.Address) func(error) {
- if !b.mayPin(addr) {
- return func(err error) {}
- }
-
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // gRPC might call Up after it called Close. We add this check
- // to "fix" it up at application layer. Otherwise, will panic
- // if b.upc is already closed.
- if b.closed {
- return func(err error) {}
- }
-
- // gRPC might call Up on a stale address.
- // Prevent updating pinAddr with a stale address.
- if !hasAddr(b.addrs, addr.Addr) {
- return func(err error) {}
- }
-
- if b.pinAddr != "" {
- logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
- return func(err error) {}
- }
-
- // notify waiting Get()s and pin first connected address
- close(b.upc)
- b.downc = make(chan struct{})
- b.pinAddr = addr.Addr
- logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr)
-
- // notify client that a connection is up
- b.readyOnce.Do(func() { close(b.readyc) })
-
- return func(err error) {
- // If connected to a black hole endpoint or a killed server, the gRPC ping
- // timeout will induce a network I/O error, and retrying until success;
- // finding healthy endpoint on retry could take several timeouts and redials.
- // To avoid wasting retries, gray-list unhealthy endpoints.
- b.hostPortError(addr.Addr, err)
-
- b.mu.Lock()
- b.upc = make(chan struct{})
- close(b.downc)
- b.pinAddr = ""
- b.mu.Unlock()
- logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
- }
-}
-
-func (b *healthBalancer) mayPin(addr grpc.Address) bool {
- if b.endpoint(addr.Addr) == "" { // stale host:port
- return false
- }
-
- b.unhealthyMu.RLock()
- unhealthyCnt := len(b.unhealthyHostPorts)
- failedTime, bad := b.unhealthyHostPorts[addr.Addr]
- b.unhealthyMu.RUnlock()
-
- b.mu.RLock()
- skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt
- b.mu.RUnlock()
- if skip || !bad {
- return true
- }
-
- // prevent isolated member's endpoint from being infinitely retried, as follows:
- // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm
- // 2. balancer 'Up' unpins with grpc: failed with network I/O error
- // 3. grpc-healthcheck still SERVING, thus retry to pin
- // instead, return before grpc-healthcheck if failed within healthcheck timeout
- if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
- logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
- return false
- }
-
- if ok, _ := b.healthCheck(addr.Addr); ok {
- b.removeUnhealthy(addr.Addr, "health check success")
- return true
- }
-
- b.hostPortError(addr.Addr, errors.New("health check failed"))
- return false
-}
-
-func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
- var (
- addr string
- closed bool
- )
-
- // If opts.BlockingWait is false (for fail-fast RPCs), it should return
- // an address it has notified via Notify immediately instead of blocking.
- if !opts.BlockingWait {
- b.mu.RLock()
- closed = b.closed
- addr = b.pinAddr
- b.mu.RUnlock()
- if closed {
- return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
- }
- if addr == "" {
- return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable
- }
- return grpc.Address{Addr: addr}, func() {}, nil
- }
-
- for {
- b.mu.RLock()
- ch := b.upc
- b.mu.RUnlock()
- select {
- case <-ch:
- case <-b.donec:
- return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
- case <-ctx.Done():
- return grpc.Address{Addr: ""}, nil, ctx.Err()
- }
- b.mu.RLock()
- closed = b.closed
- addr = b.pinAddr
- b.mu.RUnlock()
- // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed.
- if closed {
- return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
- }
- if addr != "" {
- break
- }
- }
- return grpc.Address{Addr: addr}, func() {}, nil
-}
-
-func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
-
-func (b *healthBalancer) Close() error {
- b.mu.Lock()
- // In case gRPC calls close twice. TODO: remove the checking
- // when we are sure that gRPC wont call close twice.
- if b.closed {
- b.mu.Unlock()
- <-b.donec
- return nil
- }
- b.closed = true
- b.stopOnce.Do(func() { close(b.stopc) })
- b.pinAddr = ""
-
- // In the case of following scenario:
- // 1. upc is not closed; no pinned address
- // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks
- // 3. client.conn.Close() calls balancer.Close(); closed = true
- // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled
- // we must close upc so Get() exits from blocking on upc
- select {
- case <-b.upc:
- default:
- // terminate all waiting Get()s
- close(b.upc)
- }
-
- b.mu.Unlock()
- b.wg.Wait()
-
- // wait for updateNotifyLoop to finish
- <-b.donec
- close(b.notifyCh)
-
- return nil
-}
-
-func grpcHealthCheck(client *Client, ep string) (bool, error) {
- conn, err := client.dial(ep)
- if err != nil {
- return false, err
- }
- defer conn.Close()
- cli := healthpb.NewHealthClient(conn)
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{})
- cancel()
- if err != nil {
- if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable {
- if s.Message() == unknownService { // etcd < v3.3.0
- return true, nil
- }
- }
- return false, err
- }
- return resp.Status == healthpb.HealthCheckResponse_SERVING, nil
-}
-
-func hasAddr(addrs []grpc.Address, targetAddr string) bool {
- for _, addr := range addrs {
- if targetAddr == addr.Addr {
- return true
- }
- }
- return false
-}
-
-func getHost(ep string) string {
- url, uerr := url.Parse(ep)
- if uerr != nil || !strings.Contains(ep, "://") {
- return ep
- }
- return url.Host
-}
-
-func eps2addrs(eps []string) []grpc.Address {
- addrs := make([]grpc.Address, len(eps))
- for i := range eps {
- addrs[i].Addr = getHost(eps[i])
- }
- return addrs
-}
-
-func getHostPort2ep(eps []string) map[string]string {
- hm := make(map[string]string, len(eps))
- for i := range eps {
- _, host, _ := parseEndpoint(eps[i])
- hm[host] = eps[i]
- }
- return hm
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go
deleted file mode 100644
index 5a7469b..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/kv.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- CompactResponse pb.CompactionResponse
- PutResponse pb.PutResponse
- GetResponse pb.RangeResponse
- DeleteResponse pb.DeleteRangeResponse
- TxnResponse pb.TxnResponse
-)
-
-type KV interface {
- // Put puts a key-value pair into etcd.
- // Note that key,value can be plain bytes array and string is
- // an immutable representation of that bytes array.
- // To get a string of bytes, do string([]byte{0x10, 0x20}).
- Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
-
- // Get retrieves keys.
- // By default, Get will return the value for "key", if any.
- // When passed WithRange(end), Get will return the keys in the range [key, end).
- // When passed WithFromKey(), Get returns keys greater than or equal to key.
- // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
- // if the required revision is compacted, the request will fail with ErrCompacted .
- // When passed WithLimit(limit), the number of returned keys is bounded by limit.
- // When passed WithSort(), the keys will be sorted.
- Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
-
- // Delete deletes a key, or optionally using WithRange(end), [key, end).
- Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
-
- // Compact compacts etcd KV history before the given rev.
- Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
-
- // Do applies a single Op on KV without a transaction.
- // Do is useful when creating arbitrary operations to be issued at a
- // later time; the user can range over the operations, calling Do to
- // execute them. Get/Put/Delete, on the other hand, are best suited
- // for when the operation should be issued at the time of declaration.
- Do(ctx context.Context, op Op) (OpResponse, error)
-
- // Txn creates a transaction.
- Txn(ctx context.Context) Txn
-}
-
-type OpResponse struct {
- put *PutResponse
- get *GetResponse
- del *DeleteResponse
- txn *TxnResponse
-}
-
-func (op OpResponse) Put() *PutResponse { return op.put }
-func (op OpResponse) Get() *GetResponse { return op.get }
-func (op OpResponse) Del() *DeleteResponse { return op.del }
-func (op OpResponse) Txn() *TxnResponse { return op.txn }
-
-func (resp *PutResponse) OpResponse() OpResponse {
- return OpResponse{put: resp}
-}
-func (resp *GetResponse) OpResponse() OpResponse {
- return OpResponse{get: resp}
-}
-func (resp *DeleteResponse) OpResponse() OpResponse {
- return OpResponse{del: resp}
-}
-func (resp *TxnResponse) OpResponse() OpResponse {
- return OpResponse{txn: resp}
-}
-
-type kv struct {
- remote pb.KVClient
- callOpts []grpc.CallOption
-}
-
-func NewKV(c *Client) KV {
- api := &kv{remote: RetryKVClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
- api := &kv{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
- r, err := kv.Do(ctx, OpPut(key, val, opts...))
- return r.put, toErr(ctx, err)
-}
-
-func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
- r, err := kv.Do(ctx, OpGet(key, opts...))
- return r.get, toErr(ctx, err)
-}
-
-func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
- r, err := kv.Do(ctx, OpDelete(key, opts...))
- return r.del, toErr(ctx, err)
-}
-
-func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
- resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*CompactResponse)(resp), err
-}
-
-func (kv *kv) Txn(ctx context.Context) Txn {
- return &txn{
- kv: kv,
- ctx: ctx,
- callOpts: kv.callOpts,
- }
-}
-
-func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
- var err error
- switch op.t {
- case tRange:
- var resp *pb.RangeResponse
- resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{get: (*GetResponse)(resp)}, nil
- }
- case tPut:
- var resp *pb.PutResponse
- r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{put: (*PutResponse)(resp)}, nil
- }
- case tDeleteRange:
- var resp *pb.DeleteRangeResponse
- r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{del: (*DeleteResponse)(resp)}, nil
- }
- case tTxn:
- var resp *pb.TxnResponse
- resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{txn: (*TxnResponse)(resp)}, nil
- }
- default:
- panic("Unknown op")
- }
- return OpResponse{}, toErr(ctx, err)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go
deleted file mode 100644
index 3729cf3..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/lease.go
+++ /dev/null
@@ -1,588 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
-)
-
-type (
- LeaseRevokeResponse pb.LeaseRevokeResponse
- LeaseID int64
-)
-
-// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
-type LeaseGrantResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
- Error string
-}
-
-// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
-type LeaseKeepAliveResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
-}
-
-// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
-type LeaseTimeToLiveResponse struct {
- *pb.ResponseHeader
- ID LeaseID `json:"id"`
-
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
- TTL int64 `json:"ttl"`
-
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- GrantedTTL int64 `json:"granted-ttl"`
-
- // Keys is the list of keys attached to this lease.
- Keys [][]byte `json:"keys"`
-}
-
-// LeaseStatus represents a lease status.
-type LeaseStatus struct {
- ID LeaseID `json:"id"`
- // TODO: TTL int64
-}
-
-// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
-type LeaseLeasesResponse struct {
- *pb.ResponseHeader
- Leases []LeaseStatus `json:"leases"`
-}
-
-const (
- // defaultTTL is the assumed lease TTL used for the first keepalive
- // deadline before the actual TTL is known to the client.
- defaultTTL = 5 * time.Second
- // NoLease is a lease ID for the absence of a lease.
- NoLease LeaseID = 0
-
- // retryConnWait is how long to wait before retrying request due to an error
- retryConnWait = 500 * time.Millisecond
-)
-
-// LeaseResponseChSize is the size of buffer to store unsent lease responses.
-// WARNING: DO NOT UPDATE.
-// Only for testing purposes.
-var LeaseResponseChSize = 16
-
-// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
-//
-// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
-type ErrKeepAliveHalted struct {
- Reason error
-}
-
-func (e ErrKeepAliveHalted) Error() string {
- s := "etcdclient: leases keep alive halted"
- if e.Reason != nil {
- s += ": " + e.Reason.Error()
- }
- return s
-}
-
-type Lease interface {
- // Grant creates a new lease.
- Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
-
- // Revoke revokes the given lease.
- Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
-
- // TimeToLive retrieves the lease information of the given lease ID.
- TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
-
- // Leases retrieves all leases.
- Leases(ctx context.Context) (*LeaseLeasesResponse, error)
-
- // KeepAlive keeps the given lease alive forever. If the keepalive response
- // posted to the channel is not consumed immediately, the lease client will
- // continue sending keep alive requests to the etcd server at least every
- // second until latest response is consumed.
- //
- // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
- // alive stream is interrupted in some way the client cannot handle itself;
- // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
- // from this closed channel is nil.
- //
- // If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
- // no leader") or canceled by the caller (e.g. context.Canceled), the error
- // is returned. Otherwise, it retries.
- //
- // TODO(v4.0): post errors to last keep alive message before closing
- // (see https://github.com/coreos/etcd/pull/7866)
- KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
-
- // KeepAliveOnce renews the lease once. The response corresponds to the
- // first message from calling KeepAlive. If the response has a recoverable
- // error, KeepAliveOnce will retry the RPC with a new keep alive message.
- //
- // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
- KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
-
- // Close releases all resources Lease keeps for efficient communication
- // with the etcd server.
- Close() error
-}
-
-type lessor struct {
- mu sync.Mutex // guards all fields
-
- // donec is closed and loopErr is set when recvKeepAliveLoop stops
- donec chan struct{}
- loopErr error
-
- remote pb.LeaseClient
-
- stream pb.Lease_LeaseKeepAliveClient
- streamCancel context.CancelFunc
-
- stopCtx context.Context
- stopCancel context.CancelFunc
-
- keepAlives map[LeaseID]*keepAlive
-
- // firstKeepAliveTimeout is the timeout for the first keepalive request
- // before the actual TTL is known to the lease client
- firstKeepAliveTimeout time.Duration
-
- // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
- firstKeepAliveOnce sync.Once
-
- callOpts []grpc.CallOption
-}
-
-// keepAlive multiplexes a keepalive for a lease over multiple channels
-type keepAlive struct {
- chs []chan<- *LeaseKeepAliveResponse
- ctxs []context.Context
- // deadline is the time the keep alive channels close if no response
- deadline time.Time
- // nextKeepAlive is when to send the next keep alive message
- nextKeepAlive time.Time
- // donec is closed on lease revoke, expiration, or cancel.
- donec chan struct{}
-}
-
-func NewLease(c *Client) Lease {
- return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
-}
-
-func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
- l := &lessor{
- donec: make(chan struct{}),
- keepAlives: make(map[LeaseID]*keepAlive),
- remote: remote,
- firstKeepAliveTimeout: keepAliveTimeout,
- }
- if l.firstKeepAliveTimeout == time.Second {
- l.firstKeepAliveTimeout = defaultTTL
- }
- if c != nil {
- l.callOpts = c.callOpts
- }
- reqLeaderCtx := WithRequireLeader(context.Background())
- l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
- return l
-}
-
-func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
- r := &pb.LeaseGrantRequest{TTL: ttl}
- resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
- if err == nil {
- gresp := &LeaseGrantResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- Error: resp.Error,
- }
- return gresp, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
- r := &pb.LeaseRevokeRequest{ID: int64(id)}
- resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
- if err == nil {
- return (*LeaseRevokeResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
- r := toLeaseTimeToLiveRequest(id, opts...)
- resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
- if err == nil {
- gresp := &LeaseTimeToLiveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- GrantedTTL: resp.GrantedTTL,
- Keys: resp.Keys,
- }
- return gresp, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
- resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
- if err == nil {
- leases := make([]LeaseStatus, len(resp.Leases))
- for i := range resp.Leases {
- leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
- }
- return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
- ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
-
- l.mu.Lock()
- // ensure that recvKeepAliveLoop is still running
- select {
- case <-l.donec:
- err := l.loopErr
- l.mu.Unlock()
- close(ch)
- return ch, ErrKeepAliveHalted{Reason: err}
- default:
- }
- ka, ok := l.keepAlives[id]
- if !ok {
- // create fresh keep alive
- ka = &keepAlive{
- chs: []chan<- *LeaseKeepAliveResponse{ch},
- ctxs: []context.Context{ctx},
- deadline: time.Now().Add(l.firstKeepAliveTimeout),
- nextKeepAlive: time.Now(),
- donec: make(chan struct{}),
- }
- l.keepAlives[id] = ka
- } else {
- // add channel and context to existing keep alive
- ka.ctxs = append(ka.ctxs, ctx)
- ka.chs = append(ka.chs, ch)
- }
- l.mu.Unlock()
-
- go l.keepAliveCtxCloser(id, ctx, ka.donec)
- l.firstKeepAliveOnce.Do(func() {
- go l.recvKeepAliveLoop()
- go l.deadlineLoop()
- })
-
- return ch, nil
-}
-
-func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
- for {
- resp, err := l.keepAliveOnce(ctx, id)
- if err == nil {
- if resp.TTL <= 0 {
- err = rpctypes.ErrLeaseNotFound
- }
- return resp, err
- }
- if isHaltErr(ctx, err) {
- return nil, toErr(ctx, err)
- }
- }
-}
-
-func (l *lessor) Close() error {
- l.stopCancel()
- // close for synchronous teardown if stream goroutines never launched
- l.firstKeepAliveOnce.Do(func() { close(l.donec) })
- <-l.donec
- return nil
-}
-
-func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
- select {
- case <-donec:
- return
- case <-l.donec:
- return
- case <-ctx.Done():
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[id]
- if !ok {
- return
- }
-
- // close channel and remove context if still associated with keep alive
- for i, c := range ka.ctxs {
- if c == ctx {
- close(ka.chs[i])
- ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
- ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
- break
- }
- }
- // remove if no one more listeners
- if len(ka.chs) == 0 {
- delete(l.keepAlives, id)
- }
-}
-
-// closeRequireLeader scans keepAlives for ctxs that have require leader
-// and closes the associated channels.
-func (l *lessor) closeRequireLeader() {
- l.mu.Lock()
- defer l.mu.Unlock()
- for _, ka := range l.keepAlives {
- reqIdxs := 0
- // find all required leader channels, close, mark as nil
- for i, ctx := range ka.ctxs {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- continue
- }
- ks := md[rpctypes.MetadataRequireLeaderKey]
- if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
- continue
- }
- close(ka.chs[i])
- ka.chs[i] = nil
- reqIdxs++
- }
- if reqIdxs == 0 {
- continue
- }
- // remove all channels that required a leader from keepalive
- newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
- newCtxs := make([]context.Context, len(newChs))
- newIdx := 0
- for i := range ka.chs {
- if ka.chs[i] == nil {
- continue
- }
- newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
- newIdx++
- }
- ka.chs, ka.ctxs = newChs, newCtxs
- }
-}
-
-func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- resp, rerr := stream.Recv()
- if rerr != nil {
- return nil, toErr(ctx, rerr)
- }
-
- karesp := &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
- return karesp, nil
-}
-
-func (l *lessor) recvKeepAliveLoop() (gerr error) {
- defer func() {
- l.mu.Lock()
- close(l.donec)
- l.loopErr = gerr
- for _, ka := range l.keepAlives {
- ka.close()
- }
- l.keepAlives = make(map[LeaseID]*keepAlive)
- l.mu.Unlock()
- }()
-
- for {
- stream, err := l.resetRecv()
- if err != nil {
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
- } else {
- for {
- resp, err := stream.Recv()
- if err != nil {
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
-
- if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
- l.closeRequireLeader()
- }
- break
- }
-
- l.recvKeepAlive(resp)
- }
- }
-
- select {
- case <-time.After(retryConnWait):
- continue
- case <-l.stopCtx.Done():
- return l.stopCtx.Err()
- }
- }
-}
-
-// resetRecv opens a new lease stream and starts sending keep alive requests.
-func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
- sctx, cancel := context.WithCancel(l.stopCtx)
- stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
- if err != nil {
- cancel()
- return nil, err
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.stream != nil && l.streamCancel != nil {
- l.streamCancel()
- }
-
- l.streamCancel = cancel
- l.stream = stream
-
- go l.sendKeepAliveLoop(stream)
- return stream, nil
-}
-
-// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
-func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
- karesp := &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[karesp.ID]
- if !ok {
- return
- }
-
- if karesp.TTL <= 0 {
- // lease expired; close all keep alive channels
- delete(l.keepAlives, karesp.ID)
- ka.close()
- return
- }
-
- // send update to all channels
- nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
- ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
- for _, ch := range ka.chs {
- select {
- case ch <- karesp:
- default:
- }
- // still advance in order to rate-limit keep-alive sends
- ka.nextKeepAlive = nextKeepAlive
- }
-}
-
-// deadlineLoop reaps any keep alive channels that have not received a response
-// within the lease TTL
-func (l *lessor) deadlineLoop() {
- for {
- select {
- case <-time.After(time.Second):
- case <-l.donec:
- return
- }
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.deadline.Before(now) {
- // waited too long for response; lease may be expired
- ka.close()
- delete(l.keepAlives, id)
- }
- }
- l.mu.Unlock()
- }
-}
-
-// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
-func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
- for {
- var tosend []LeaseID
-
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.nextKeepAlive.Before(now) {
- tosend = append(tosend, id)
- }
- }
- l.mu.Unlock()
-
- for _, id := range tosend {
- r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
- if err := stream.Send(r); err != nil {
- // TODO do something with this error?
- return
- }
- }
-
- select {
- case <-time.After(500 * time.Millisecond):
- case <-stream.Context().Done():
- return
- case <-l.donec:
- return
- case <-l.stopCtx.Done():
- return
- }
- }
-}
-
-func (ka *keepAlive) close() {
- close(ka.donec)
- for _, ch := range ka.chs {
- close(ch)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/cache.go b/vendor/github.com/coreos/etcd/clientv3/leasing/cache.go
deleted file mode 100644
index 6903a78..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/leasing/cache.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasing
-
-import (
- "context"
- "strings"
- "sync"
- "time"
-
- v3 "github.com/coreos/etcd/clientv3"
- v3pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-const revokeBackoff = 2 * time.Second
-
-type leaseCache struct {
- mu sync.RWMutex
- entries map[string]*leaseKey
- revokes map[string]time.Time
- header *v3pb.ResponseHeader
-}
-
-type leaseKey struct {
- response *v3.GetResponse
- // rev is the leasing key revision.
- rev int64
- waitc chan struct{}
-}
-
-func (lc *leaseCache) Rev(key string) int64 {
- lc.mu.RLock()
- defer lc.mu.RUnlock()
- if li := lc.entries[key]; li != nil {
- return li.rev
- }
- return 0
-}
-
-func (lc *leaseCache) Lock(key string) (chan<- struct{}, int64) {
- lc.mu.Lock()
- defer lc.mu.Unlock()
- if li := lc.entries[key]; li != nil {
- li.waitc = make(chan struct{})
- return li.waitc, li.rev
- }
- return nil, 0
-}
-
-func (lc *leaseCache) LockRange(begin, end string) (ret []chan<- struct{}) {
- lc.mu.Lock()
- defer lc.mu.Unlock()
- for k, li := range lc.entries {
- if inRange(k, begin, end) {
- li.waitc = make(chan struct{})
- ret = append(ret, li.waitc)
- }
- }
- return ret
-}
-
-func inRange(k, begin, end string) bool {
- if strings.Compare(k, begin) < 0 {
- return false
- }
- if end != "\x00" && strings.Compare(k, end) >= 0 {
- return false
- }
- return true
-}
-
-func (lc *leaseCache) LockWriteOps(ops []v3.Op) (ret []chan<- struct{}) {
- for _, op := range ops {
- if op.IsGet() {
- continue
- }
- key := string(op.KeyBytes())
- if end := string(op.RangeBytes()); end == "" {
- if wc, _ := lc.Lock(key); wc != nil {
- ret = append(ret, wc)
- }
- } else {
- for k := range lc.entries {
- if !inRange(k, key, end) {
- continue
- }
- if wc, _ := lc.Lock(k); wc != nil {
- ret = append(ret, wc)
- }
- }
- }
- }
- return ret
-}
-
-func (lc *leaseCache) NotifyOps(ops []v3.Op) (wcs []<-chan struct{}) {
- for _, op := range ops {
- if op.IsGet() {
- if _, wc := lc.notify(string(op.KeyBytes())); wc != nil {
- wcs = append(wcs, wc)
- }
- }
- }
- return wcs
-}
-
-func (lc *leaseCache) MayAcquire(key string) bool {
- lc.mu.RLock()
- lr, ok := lc.revokes[key]
- lc.mu.RUnlock()
- return !ok || time.Since(lr) > revokeBackoff
-}
-
-func (lc *leaseCache) Add(key string, resp *v3.GetResponse, op v3.Op) *v3.GetResponse {
- lk := &leaseKey{resp, resp.Header.Revision, closedCh}
- lc.mu.Lock()
- if lc.header == nil || lc.header.Revision < resp.Header.Revision {
- lc.header = resp.Header
- }
- lc.entries[key] = lk
- ret := lk.get(op)
- lc.mu.Unlock()
- return ret
-}
-
-func (lc *leaseCache) Update(key, val []byte, respHeader *v3pb.ResponseHeader) {
- li := lc.entries[string(key)]
- if li == nil {
- return
- }
- cacheResp := li.response
- if len(cacheResp.Kvs) == 0 {
- kv := &mvccpb.KeyValue{
- Key: key,
- CreateRevision: respHeader.Revision,
- }
- cacheResp.Kvs = append(cacheResp.Kvs, kv)
- cacheResp.Count = 1
- }
- cacheResp.Kvs[0].Version++
- if cacheResp.Kvs[0].ModRevision < respHeader.Revision {
- cacheResp.Header = respHeader
- cacheResp.Kvs[0].ModRevision = respHeader.Revision
- cacheResp.Kvs[0].Value = val
- }
-}
-
-func (lc *leaseCache) Delete(key string, hdr *v3pb.ResponseHeader) {
- lc.mu.Lock()
- defer lc.mu.Unlock()
- lc.delete(key, hdr)
-}
-
-func (lc *leaseCache) delete(key string, hdr *v3pb.ResponseHeader) {
- if li := lc.entries[key]; li != nil && hdr.Revision >= li.response.Header.Revision {
- li.response.Kvs = nil
- li.response.Header = copyHeader(hdr)
- }
-}
-
-func (lc *leaseCache) Evict(key string) (rev int64) {
- lc.mu.Lock()
- defer lc.mu.Unlock()
- if li := lc.entries[key]; li != nil {
- rev = li.rev
- delete(lc.entries, key)
- lc.revokes[key] = time.Now()
- }
- return rev
-}
-
-func (lc *leaseCache) EvictRange(key, end string) {
- lc.mu.Lock()
- defer lc.mu.Unlock()
- for k := range lc.entries {
- if inRange(k, key, end) {
- delete(lc.entries, key)
- lc.revokes[key] = time.Now()
- }
- }
-}
-
-func isBadOp(op v3.Op) bool { return op.Rev() > 0 || len(op.RangeBytes()) > 0 }
-
-func (lc *leaseCache) Get(ctx context.Context, op v3.Op) (*v3.GetResponse, bool) {
- if isBadOp(op) {
- return nil, false
- }
- key := string(op.KeyBytes())
- li, wc := lc.notify(key)
- if li == nil {
- return nil, true
- }
- select {
- case <-wc:
- case <-ctx.Done():
- return nil, true
- }
- lc.mu.RLock()
- lk := *li
- ret := lk.get(op)
- lc.mu.RUnlock()
- return ret, true
-}
-
-func (lk *leaseKey) get(op v3.Op) *v3.GetResponse {
- ret := *lk.response
- ret.Header = copyHeader(ret.Header)
- empty := len(ret.Kvs) == 0 || op.IsCountOnly()
- empty = empty || (op.MinModRev() > ret.Kvs[0].ModRevision)
- empty = empty || (op.MaxModRev() != 0 && op.MaxModRev() < ret.Kvs[0].ModRevision)
- empty = empty || (op.MinCreateRev() > ret.Kvs[0].CreateRevision)
- empty = empty || (op.MaxCreateRev() != 0 && op.MaxCreateRev() < ret.Kvs[0].CreateRevision)
- if empty {
- ret.Kvs = nil
- } else {
- kv := *ret.Kvs[0]
- kv.Key = make([]byte, len(kv.Key))
- copy(kv.Key, ret.Kvs[0].Key)
- if !op.IsKeysOnly() {
- kv.Value = make([]byte, len(kv.Value))
- copy(kv.Value, ret.Kvs[0].Value)
- }
- ret.Kvs = []*mvccpb.KeyValue{&kv}
- }
- return &ret
-}
-
-func (lc *leaseCache) notify(key string) (*leaseKey, <-chan struct{}) {
- lc.mu.RLock()
- defer lc.mu.RUnlock()
- if li := lc.entries[key]; li != nil {
- return li, li.waitc
- }
- return nil, nil
-}
-
-func (lc *leaseCache) clearOldRevokes(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case <-time.After(time.Second):
- lc.mu.Lock()
- for k, lr := range lc.revokes {
- if time.Now().Sub(lr.Add(revokeBackoff)) > 0 {
- delete(lc.revokes, k)
- }
- }
- lc.mu.Unlock()
- }
- }
-}
-
-func (lc *leaseCache) evalCmp(cmps []v3.Cmp) (cmpVal bool, ok bool) {
- for _, cmp := range cmps {
- if len(cmp.RangeEnd) > 0 {
- return false, false
- }
- lk := lc.entries[string(cmp.Key)]
- if lk == nil {
- return false, false
- }
- if !evalCmp(lk.response, cmp) {
- return false, true
- }
- }
- return true, true
-}
-
-func (lc *leaseCache) evalOps(ops []v3.Op) ([]*v3pb.ResponseOp, bool) {
- resps := make([]*v3pb.ResponseOp, len(ops))
- for i, op := range ops {
- if !op.IsGet() || isBadOp(op) {
- // TODO: support read-only Txn
- return nil, false
- }
- lk := lc.entries[string(op.KeyBytes())]
- if lk == nil {
- return nil, false
- }
- resp := lk.get(op)
- if resp == nil {
- return nil, false
- }
- resps[i] = &v3pb.ResponseOp{
- Response: &v3pb.ResponseOp_ResponseRange{
- (*v3pb.RangeResponse)(resp),
- },
- }
- }
- return resps, true
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/doc.go b/vendor/github.com/coreos/etcd/clientv3/leasing/doc.go
deleted file mode 100644
index fc97fc8..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/leasing/doc.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package leasing serves linearizable reads from a local cache by acquiring
-// exclusive write access to keys through a client-side leasing protocol. This
-// leasing layer can either directly wrap the etcd client or it can be exposed
-// through the etcd grpc proxy server, granting multiple clients write access.
-//
-// First, create a leasing KV from a clientv3.Client 'cli':
-//
-// lkv, err := leasing.NewKV(cli, "leasing-prefix")
-// if err != nil {
-// // handle error
-// }
-//
-// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's
-// key locally. On the server, the leasing key is stored to "leasing-prefix/abc":
-//
-// resp, err := lkv.Get(context.TODO(), "abc")
-//
-// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime:
-//
-// resp, err = lkv.Get(context.TODO(), "abc")
-//
-// If another leasing client writes to a leased key, then the owner relinquishes its exclusive
-// access, permitting the writer to modify the key:
-//
-// lkv2, err := leasing.NewKV(cli, "leasing-prefix")
-// if err != nil {
-// // handle error
-// }
-// lkv2.Put(context.TODO(), "abc", "456")
-// resp, err = lkv.Get("abc")
-//
-package leasing
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/kv.go b/vendor/github.com/coreos/etcd/clientv3/leasing/kv.go
deleted file mode 100644
index 5a5e231..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/leasing/kv.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasing
-
-import (
- "context"
- "strings"
- "sync"
- "time"
-
- v3 "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/mvccpb"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-type leasingKV struct {
- cl *v3.Client
- kv v3.KV
- pfx string
- leases leaseCache
-
- ctx context.Context
- cancel context.CancelFunc
- wg sync.WaitGroup
-
- sessionOpts []concurrency.SessionOption
- session *concurrency.Session
- sessionc chan struct{}
-}
-
-var closedCh chan struct{}
-
-func init() {
- closedCh = make(chan struct{})
- close(closedCh)
-}
-
-// NewKV wraps a KV instance so that all requests are wired through a leasing protocol.
-func NewKV(cl *v3.Client, pfx string, opts ...concurrency.SessionOption) (v3.KV, func(), error) {
- cctx, cancel := context.WithCancel(cl.Ctx())
- lkv := &leasingKV{
- cl: cl,
- kv: cl.KV,
- pfx: pfx,
- leases: leaseCache{revokes: make(map[string]time.Time)},
- ctx: cctx,
- cancel: cancel,
- sessionOpts: opts,
- sessionc: make(chan struct{}),
- }
- lkv.wg.Add(2)
- go func() {
- defer lkv.wg.Done()
- lkv.monitorSession()
- }()
- go func() {
- defer lkv.wg.Done()
- lkv.leases.clearOldRevokes(cctx)
- }()
- return lkv, lkv.Close, lkv.waitSession(cctx)
-}
-
-func (lkv *leasingKV) Close() {
- lkv.cancel()
- lkv.wg.Wait()
-}
-
-func (lkv *leasingKV) Get(ctx context.Context, key string, opts ...v3.OpOption) (*v3.GetResponse, error) {
- return lkv.get(ctx, v3.OpGet(key, opts...))
-}
-
-func (lkv *leasingKV) Put(ctx context.Context, key, val string, opts ...v3.OpOption) (*v3.PutResponse, error) {
- return lkv.put(ctx, v3.OpPut(key, val, opts...))
-}
-
-func (lkv *leasingKV) Delete(ctx context.Context, key string, opts ...v3.OpOption) (*v3.DeleteResponse, error) {
- return lkv.delete(ctx, v3.OpDelete(key, opts...))
-}
-
-func (lkv *leasingKV) Do(ctx context.Context, op v3.Op) (v3.OpResponse, error) {
- switch {
- case op.IsGet():
- resp, err := lkv.get(ctx, op)
- return resp.OpResponse(), err
- case op.IsPut():
- resp, err := lkv.put(ctx, op)
- return resp.OpResponse(), err
- case op.IsDelete():
- resp, err := lkv.delete(ctx, op)
- return resp.OpResponse(), err
- case op.IsTxn():
- cmps, thenOps, elseOps := op.Txn()
- resp, err := lkv.Txn(ctx).If(cmps...).Then(thenOps...).Else(elseOps...).Commit()
- return resp.OpResponse(), err
- }
- return v3.OpResponse{}, nil
-}
-
-func (lkv *leasingKV) Compact(ctx context.Context, rev int64, opts ...v3.CompactOption) (*v3.CompactResponse, error) {
- return lkv.kv.Compact(ctx, rev, opts...)
-}
-
-func (lkv *leasingKV) Txn(ctx context.Context) v3.Txn {
- return &txnLeasing{Txn: lkv.kv.Txn(ctx), lkv: lkv, ctx: ctx}
-}
-
-func (lkv *leasingKV) monitorSession() {
- for lkv.ctx.Err() == nil {
- if lkv.session != nil {
- select {
- case <-lkv.session.Done():
- case <-lkv.ctx.Done():
- return
- }
- }
- lkv.leases.mu.Lock()
- select {
- case <-lkv.sessionc:
- lkv.sessionc = make(chan struct{})
- default:
- }
- lkv.leases.entries = make(map[string]*leaseKey)
- lkv.leases.mu.Unlock()
-
- s, err := concurrency.NewSession(lkv.cl, lkv.sessionOpts...)
- if err != nil {
- continue
- }
-
- lkv.leases.mu.Lock()
- lkv.session = s
- close(lkv.sessionc)
- lkv.leases.mu.Unlock()
- }
-}
-
-func (lkv *leasingKV) monitorLease(ctx context.Context, key string, rev int64) {
- cctx, cancel := context.WithCancel(lkv.ctx)
- defer cancel()
- for cctx.Err() == nil {
- if rev == 0 {
- resp, err := lkv.kv.Get(ctx, lkv.pfx+key)
- if err != nil {
- continue
- }
- rev = resp.Header.Revision
- if len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) == "REVOKE" {
- lkv.rescind(cctx, key, rev)
- return
- }
- }
- wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
- for resp := range wch {
- for _, ev := range resp.Events {
- if string(ev.Kv.Value) != "REVOKE" {
- continue
- }
- if v3.LeaseID(ev.Kv.Lease) == lkv.leaseID() {
- lkv.rescind(cctx, key, ev.Kv.ModRevision)
- }
- return
- }
- }
- rev = 0
- }
-}
-
-// rescind releases a lease from this client.
-func (lkv *leasingKV) rescind(ctx context.Context, key string, rev int64) {
- if lkv.leases.Evict(key) > rev {
- return
- }
- cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev)
- op := v3.OpDelete(lkv.pfx + key)
- for ctx.Err() == nil {
- if _, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit(); err == nil {
- return
- }
- }
-}
-
-func (lkv *leasingKV) waitRescind(ctx context.Context, key string, rev int64) error {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
- wch := lkv.cl.Watch(cctx, lkv.pfx+key, v3.WithRev(rev+1))
- for resp := range wch {
- for _, ev := range resp.Events {
- if ev.Type == v3.EventTypeDelete {
- return ctx.Err()
- }
- }
- }
- return ctx.Err()
-}
-
-func (lkv *leasingKV) tryModifyOp(ctx context.Context, op v3.Op) (*v3.TxnResponse, chan<- struct{}, error) {
- key := string(op.KeyBytes())
- wc, rev := lkv.leases.Lock(key)
- cmp := v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)
- resp, err := lkv.kv.Txn(ctx).If(cmp).Then(op).Commit()
- switch {
- case err != nil:
- lkv.leases.Evict(key)
- fallthrough
- case !resp.Succeeded:
- if wc != nil {
- close(wc)
- }
- return nil, nil, err
- }
- return resp, wc, nil
-}
-
-func (lkv *leasingKV) put(ctx context.Context, op v3.Op) (pr *v3.PutResponse, err error) {
- if err := lkv.waitSession(ctx); err != nil {
- return nil, err
- }
- for ctx.Err() == nil {
- resp, wc, err := lkv.tryModifyOp(ctx, op)
- if err != nil || wc == nil {
- resp, err = lkv.revoke(ctx, string(op.KeyBytes()), op)
- }
- if err != nil {
- return nil, err
- }
- if resp.Succeeded {
- lkv.leases.mu.Lock()
- lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), resp.Header)
- lkv.leases.mu.Unlock()
- pr = (*v3.PutResponse)(resp.Responses[0].GetResponsePut())
- pr.Header = resp.Header
- }
- if wc != nil {
- close(wc)
- }
- if resp.Succeeded {
- return pr, nil
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) acquire(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
- for ctx.Err() == nil {
- if err := lkv.waitSession(ctx); err != nil {
- return nil, err
- }
- lcmp := v3.Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
- resp, err := lkv.kv.Txn(ctx).If(
- v3.Compare(v3.CreateRevision(lkv.pfx+key), "=", 0),
- v3.Compare(lcmp, "=", 0)).
- Then(
- op,
- v3.OpPut(lkv.pfx+key, "", v3.WithLease(lkv.leaseID()))).
- Else(
- op,
- v3.OpGet(lkv.pfx+key),
- ).Commit()
- if err == nil {
- if !resp.Succeeded {
- kvs := resp.Responses[1].GetResponseRange().Kvs
- // if txn failed since already owner, lease is acquired
- resp.Succeeded = len(kvs) > 0 && v3.LeaseID(kvs[0].Lease) == lkv.leaseID()
- }
- return resp, nil
- }
- // retry if transient error
- if _, ok := err.(rpctypes.EtcdError); ok {
- return nil, err
- }
- if ev, _ := status.FromError(err); ev.Code() != codes.Unavailable {
- return nil, err
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) get(ctx context.Context, op v3.Op) (*v3.GetResponse, error) {
- do := func() (*v3.GetResponse, error) {
- r, err := lkv.kv.Do(ctx, op)
- return r.Get(), err
- }
- if !lkv.readySession() {
- return do()
- }
-
- if resp, ok := lkv.leases.Get(ctx, op); resp != nil {
- return resp, nil
- } else if !ok || op.IsSerializable() {
- // must be handled by server or can skip linearization
- return do()
- }
-
- key := string(op.KeyBytes())
- if !lkv.leases.MayAcquire(key) {
- resp, err := lkv.kv.Do(ctx, op)
- return resp.Get(), err
- }
-
- resp, err := lkv.acquire(ctx, key, v3.OpGet(key))
- if err != nil {
- return nil, err
- }
- getResp := (*v3.GetResponse)(resp.Responses[0].GetResponseRange())
- getResp.Header = resp.Header
- if resp.Succeeded {
- getResp = lkv.leases.Add(key, getResp, op)
- lkv.wg.Add(1)
- go func() {
- defer lkv.wg.Done()
- lkv.monitorLease(ctx, key, resp.Header.Revision)
- }()
- }
- return getResp, nil
-}
-
-func (lkv *leasingKV) deleteRangeRPC(ctx context.Context, maxLeaseRev int64, key, end string) (*v3.DeleteResponse, error) {
- lkey, lend := lkv.pfx+key, lkv.pfx+end
- resp, err := lkv.kv.Txn(ctx).If(
- v3.Compare(v3.CreateRevision(lkey).WithRange(lend), "<", maxLeaseRev+1),
- ).Then(
- v3.OpGet(key, v3.WithRange(end), v3.WithKeysOnly()),
- v3.OpDelete(key, v3.WithRange(end)),
- ).Commit()
- if err != nil {
- lkv.leases.EvictRange(key, end)
- return nil, err
- }
- if !resp.Succeeded {
- return nil, nil
- }
- for _, kv := range resp.Responses[0].GetResponseRange().Kvs {
- lkv.leases.Delete(string(kv.Key), resp.Header)
- }
- delResp := (*v3.DeleteResponse)(resp.Responses[1].GetResponseDeleteRange())
- delResp.Header = resp.Header
- return delResp, nil
-}
-
-func (lkv *leasingKV) deleteRange(ctx context.Context, op v3.Op) (*v3.DeleteResponse, error) {
- key, end := string(op.KeyBytes()), string(op.RangeBytes())
- for ctx.Err() == nil {
- maxLeaseRev, err := lkv.revokeRange(ctx, key, end)
- if err != nil {
- return nil, err
- }
- wcs := lkv.leases.LockRange(key, end)
- delResp, err := lkv.deleteRangeRPC(ctx, maxLeaseRev, key, end)
- closeAll(wcs)
- if err != nil || delResp != nil {
- return delResp, err
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) delete(ctx context.Context, op v3.Op) (dr *v3.DeleteResponse, err error) {
- if err := lkv.waitSession(ctx); err != nil {
- return nil, err
- }
- if len(op.RangeBytes()) > 0 {
- return lkv.deleteRange(ctx, op)
- }
- key := string(op.KeyBytes())
- for ctx.Err() == nil {
- resp, wc, err := lkv.tryModifyOp(ctx, op)
- if err != nil || wc == nil {
- resp, err = lkv.revoke(ctx, key, op)
- }
- if err != nil {
- // don't know if delete was processed
- lkv.leases.Evict(key)
- return nil, err
- }
- if resp.Succeeded {
- dr = (*v3.DeleteResponse)(resp.Responses[0].GetResponseDeleteRange())
- dr.Header = resp.Header
- lkv.leases.Delete(key, dr.Header)
- }
- if wc != nil {
- close(wc)
- }
- if resp.Succeeded {
- return dr, nil
- }
- }
- return nil, ctx.Err()
-}
-
-func (lkv *leasingKV) revoke(ctx context.Context, key string, op v3.Op) (*v3.TxnResponse, error) {
- rev := lkv.leases.Rev(key)
- txn := lkv.kv.Txn(ctx).If(v3.Compare(v3.CreateRevision(lkv.pfx+key), "<", rev+1)).Then(op)
- resp, err := txn.Else(v3.OpPut(lkv.pfx+key, "REVOKE", v3.WithIgnoreLease())).Commit()
- if err != nil || resp.Succeeded {
- return resp, err
- }
- return resp, lkv.waitRescind(ctx, key, resp.Header.Revision)
-}
-
-func (lkv *leasingKV) revokeRange(ctx context.Context, begin, end string) (int64, error) {
- lkey, lend := lkv.pfx+begin, ""
- if len(end) > 0 {
- lend = lkv.pfx + end
- }
- leaseKeys, err := lkv.kv.Get(ctx, lkey, v3.WithRange(lend))
- if err != nil {
- return 0, err
- }
- return lkv.revokeLeaseKvs(ctx, leaseKeys.Kvs)
-}
-
-func (lkv *leasingKV) revokeLeaseKvs(ctx context.Context, kvs []*mvccpb.KeyValue) (int64, error) {
- maxLeaseRev := int64(0)
- for _, kv := range kvs {
- if rev := kv.CreateRevision; rev > maxLeaseRev {
- maxLeaseRev = rev
- }
- if v3.LeaseID(kv.Lease) == lkv.leaseID() {
- // don't revoke own keys
- continue
- }
- key := strings.TrimPrefix(string(kv.Key), lkv.pfx)
- if _, err := lkv.revoke(ctx, key, v3.OpGet(key)); err != nil {
- return 0, err
- }
- }
- return maxLeaseRev, nil
-}
-
-func (lkv *leasingKV) waitSession(ctx context.Context) error {
- lkv.leases.mu.RLock()
- sessionc := lkv.sessionc
- lkv.leases.mu.RUnlock()
- select {
- case <-sessionc:
- return nil
- case <-lkv.ctx.Done():
- return lkv.ctx.Err()
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func (lkv *leasingKV) readySession() bool {
- lkv.leases.mu.RLock()
- defer lkv.leases.mu.RUnlock()
- if lkv.session == nil {
- return false
- }
- select {
- case <-lkv.session.Done():
- default:
- return true
- }
- return false
-}
-
-func (lkv *leasingKV) leaseID() v3.LeaseID {
- lkv.leases.mu.RLock()
- defer lkv.leases.mu.RUnlock()
- return lkv.session.Lease()
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/txn.go b/vendor/github.com/coreos/etcd/clientv3/leasing/txn.go
deleted file mode 100644
index da5b83a..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/leasing/txn.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasing
-
-import (
- "context"
- "strings"
-
- v3 "github.com/coreos/etcd/clientv3"
- v3pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type txnLeasing struct {
- v3.Txn
- lkv *leasingKV
- ctx context.Context
- cs []v3.Cmp
- opst []v3.Op
- opse []v3.Op
-}
-
-func (txn *txnLeasing) If(cs ...v3.Cmp) v3.Txn {
- txn.cs = append(txn.cs, cs...)
- txn.Txn = txn.Txn.If(cs...)
- return txn
-}
-
-func (txn *txnLeasing) Then(ops ...v3.Op) v3.Txn {
- txn.opst = append(txn.opst, ops...)
- txn.Txn = txn.Txn.Then(ops...)
- return txn
-}
-
-func (txn *txnLeasing) Else(ops ...v3.Op) v3.Txn {
- txn.opse = append(txn.opse, ops...)
- txn.Txn = txn.Txn.Else(ops...)
- return txn
-}
-
-func (txn *txnLeasing) Commit() (*v3.TxnResponse, error) {
- if resp, err := txn.eval(); resp != nil || err != nil {
- return resp, err
- }
- return txn.serverTxn()
-}
-
-func (txn *txnLeasing) eval() (*v3.TxnResponse, error) {
- // TODO: wait on keys in comparisons
- thenOps, elseOps := gatherOps(txn.opst), gatherOps(txn.opse)
- ops := make([]v3.Op, 0, len(thenOps)+len(elseOps))
- ops = append(ops, thenOps...)
- ops = append(ops, elseOps...)
-
- for _, ch := range txn.lkv.leases.NotifyOps(ops) {
- select {
- case <-ch:
- case <-txn.ctx.Done():
- return nil, txn.ctx.Err()
- }
- }
-
- txn.lkv.leases.mu.RLock()
- defer txn.lkv.leases.mu.RUnlock()
- succeeded, ok := txn.lkv.leases.evalCmp(txn.cs)
- if !ok || txn.lkv.leases.header == nil {
- return nil, nil
- }
- if ops = txn.opst; !succeeded {
- ops = txn.opse
- }
-
- resps, ok := txn.lkv.leases.evalOps(ops)
- if !ok {
- return nil, nil
- }
- return &v3.TxnResponse{copyHeader(txn.lkv.leases.header), succeeded, resps}, nil
-}
-
-// fallback computes the ops to fetch all possible conflicting
-// leasing keys for a list of ops.
-func (txn *txnLeasing) fallback(ops []v3.Op) (fbOps []v3.Op) {
- for _, op := range ops {
- if op.IsGet() {
- continue
- }
- lkey, lend := txn.lkv.pfx+string(op.KeyBytes()), ""
- if len(op.RangeBytes()) > 0 {
- lend = txn.lkv.pfx + string(op.RangeBytes())
- }
- fbOps = append(fbOps, v3.OpGet(lkey, v3.WithRange(lend)))
- }
- return fbOps
-}
-
-func (txn *txnLeasing) guardKeys(ops []v3.Op) (cmps []v3.Cmp) {
- seen := make(map[string]bool)
- for _, op := range ops {
- key := string(op.KeyBytes())
- if op.IsGet() || len(op.RangeBytes()) != 0 || seen[key] {
- continue
- }
- rev := txn.lkv.leases.Rev(key)
- cmps = append(cmps, v3.Compare(v3.CreateRevision(txn.lkv.pfx+key), "<", rev+1))
- seen[key] = true
- }
- return cmps
-}
-
-func (txn *txnLeasing) guardRanges(ops []v3.Op) (cmps []v3.Cmp, err error) {
- for _, op := range ops {
- if op.IsGet() || len(op.RangeBytes()) == 0 {
- continue
- }
-
- key, end := string(op.KeyBytes()), string(op.RangeBytes())
- maxRevLK, err := txn.lkv.revokeRange(txn.ctx, key, end)
- if err != nil {
- return nil, err
- }
-
- opts := append(v3.WithLastRev(), v3.WithRange(end))
- getResp, err := txn.lkv.kv.Get(txn.ctx, key, opts...)
- if err != nil {
- return nil, err
- }
- maxModRev := int64(0)
- if len(getResp.Kvs) > 0 {
- maxModRev = getResp.Kvs[0].ModRevision
- }
-
- noKeyUpdate := v3.Compare(v3.ModRevision(key).WithRange(end), "<", maxModRev+1)
- noLeaseUpdate := v3.Compare(
- v3.CreateRevision(txn.lkv.pfx+key).WithRange(txn.lkv.pfx+end),
- "<",
- maxRevLK+1)
- cmps = append(cmps, noKeyUpdate, noLeaseUpdate)
- }
- return cmps, nil
-}
-
-func (txn *txnLeasing) guard(ops []v3.Op) ([]v3.Cmp, error) {
- cmps := txn.guardKeys(ops)
- rangeCmps, err := txn.guardRanges(ops)
- return append(cmps, rangeCmps...), err
-}
-
-func (txn *txnLeasing) commitToCache(txnResp *v3pb.TxnResponse, userTxn v3.Op) {
- ops := gatherResponseOps(txnResp.Responses, []v3.Op{userTxn})
- txn.lkv.leases.mu.Lock()
- for _, op := range ops {
- key := string(op.KeyBytes())
- if op.IsDelete() && len(op.RangeBytes()) > 0 {
- end := string(op.RangeBytes())
- for k := range txn.lkv.leases.entries {
- if inRange(k, key, end) {
- txn.lkv.leases.delete(k, txnResp.Header)
- }
- }
- } else if op.IsDelete() {
- txn.lkv.leases.delete(key, txnResp.Header)
- }
- if op.IsPut() {
- txn.lkv.leases.Update(op.KeyBytes(), op.ValueBytes(), txnResp.Header)
- }
- }
- txn.lkv.leases.mu.Unlock()
-}
-
-func (txn *txnLeasing) revokeFallback(fbResps []*v3pb.ResponseOp) error {
- for _, resp := range fbResps {
- _, err := txn.lkv.revokeLeaseKvs(txn.ctx, resp.GetResponseRange().Kvs)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (txn *txnLeasing) serverTxn() (*v3.TxnResponse, error) {
- if err := txn.lkv.waitSession(txn.ctx); err != nil {
- return nil, err
- }
-
- userOps := gatherOps(append(txn.opst, txn.opse...))
- userTxn := v3.OpTxn(txn.cs, txn.opst, txn.opse)
- fbOps := txn.fallback(userOps)
-
- defer closeAll(txn.lkv.leases.LockWriteOps(userOps))
- for {
- cmps, err := txn.guard(userOps)
- if err != nil {
- return nil, err
- }
- resp, err := txn.lkv.kv.Txn(txn.ctx).If(cmps...).Then(userTxn).Else(fbOps...).Commit()
- if err != nil {
- for _, cmp := range cmps {
- txn.lkv.leases.Evict(strings.TrimPrefix(string(cmp.Key), txn.lkv.pfx))
- }
- return nil, err
- }
- if resp.Succeeded {
- txn.commitToCache((*v3pb.TxnResponse)(resp), userTxn)
- userResp := resp.Responses[0].GetResponseTxn()
- userResp.Header = resp.Header
- return (*v3.TxnResponse)(userResp), nil
- }
- if err := txn.revokeFallback(resp.Responses); err != nil {
- return nil, err
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/leasing/util.go b/vendor/github.com/coreos/etcd/clientv3/leasing/util.go
deleted file mode 100644
index 61f6e8c..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/leasing/util.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasing
-
-import (
- "bytes"
-
- v3 "github.com/coreos/etcd/clientv3"
- v3pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-func compareInt64(a, b int64) int {
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
-}
-
-func evalCmp(resp *v3.GetResponse, tcmp v3.Cmp) bool {
- var result int
- if len(resp.Kvs) != 0 {
- kv := resp.Kvs[0]
- switch tcmp.Target {
- case v3pb.Compare_VALUE:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Value); tv != nil {
- result = bytes.Compare(kv.Value, tv.Value)
- }
- case v3pb.Compare_CREATE:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_CreateRevision); tv != nil {
- result = compareInt64(kv.CreateRevision, tv.CreateRevision)
- }
- case v3pb.Compare_MOD:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_ModRevision); tv != nil {
- result = compareInt64(kv.ModRevision, tv.ModRevision)
- }
- case v3pb.Compare_VERSION:
- if tv, _ := tcmp.TargetUnion.(*v3pb.Compare_Version); tv != nil {
- result = compareInt64(kv.Version, tv.Version)
- }
- }
- }
- switch tcmp.Result {
- case v3pb.Compare_EQUAL:
- return result == 0
- case v3pb.Compare_NOT_EQUAL:
- return result != 0
- case v3pb.Compare_GREATER:
- return result > 0
- case v3pb.Compare_LESS:
- return result < 0
- }
- return true
-}
-
-func gatherOps(ops []v3.Op) (ret []v3.Op) {
- for _, op := range ops {
- if !op.IsTxn() {
- ret = append(ret, op)
- continue
- }
- _, thenOps, elseOps := op.Txn()
- ret = append(ret, gatherOps(append(thenOps, elseOps...))...)
- }
- return ret
-}
-
-func gatherResponseOps(resp []*v3pb.ResponseOp, ops []v3.Op) (ret []v3.Op) {
- for i, op := range ops {
- if !op.IsTxn() {
- ret = append(ret, op)
- continue
- }
- _, thenOps, elseOps := op.Txn()
- if txnResp := resp[i].GetResponseTxn(); txnResp.Succeeded {
- ret = append(ret, gatherResponseOps(txnResp.Responses, thenOps)...)
- } else {
- ret = append(ret, gatherResponseOps(txnResp.Responses, elseOps)...)
- }
- }
- return ret
-}
-
-func copyHeader(hdr *v3pb.ResponseHeader) *v3pb.ResponseHeader {
- h := *hdr
- return &h
-}
-
-func closeAll(chs []chan<- struct{}) {
- for _, ch := range chs {
- close(ch)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go
deleted file mode 100644
index 782e313..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/logger.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "io/ioutil"
- "sync"
-
- "google.golang.org/grpc/grpclog"
-)
-
-// Logger is the logger used by client library.
-// It implements grpclog.LoggerV2 interface.
-type Logger interface {
- grpclog.LoggerV2
-
- // Lvl returns logger if logger's verbosity level >= "lvl".
- // Otherwise, logger that discards all logs.
- Lvl(lvl int) Logger
-
- // to satisfy capnslog
-
- Print(args ...interface{})
- Printf(format string, args ...interface{})
- Println(args ...interface{})
-}
-
-var (
- loggerMu sync.RWMutex
- logger Logger
-)
-
-type settableLogger struct {
- l grpclog.LoggerV2
- mu sync.RWMutex
-}
-
-func init() {
- // disable client side logs by default
- logger = &settableLogger{}
- SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
-}
-
-// SetLogger sets client-side Logger.
-func SetLogger(l grpclog.LoggerV2) {
- loggerMu.Lock()
- logger = NewLogger(l)
- // override grpclog so that any changes happen with locking
- grpclog.SetLoggerV2(logger)
- loggerMu.Unlock()
-}
-
-// GetLogger returns the current logger.
-func GetLogger() Logger {
- loggerMu.RLock()
- l := logger
- loggerMu.RUnlock()
- return l
-}
-
-// NewLogger returns a new Logger with grpclog.LoggerV2.
-func NewLogger(gl grpclog.LoggerV2) Logger {
- return &settableLogger{l: gl}
-}
-
-func (s *settableLogger) get() grpclog.LoggerV2 {
- s.mu.RLock()
- l := s.l
- s.mu.RUnlock()
- return l
-}
-
-// implement the grpclog.LoggerV2 interface
-
-func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
-func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
-func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
-func (s *settableLogger) Warningf(format string, args ...interface{}) {
- s.get().Warningf(format, args...)
-}
-func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
-func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
-func (s *settableLogger) Errorf(format string, args ...interface{}) {
- s.get().Errorf(format, args...)
-}
-func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
-func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
-func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
-func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
-func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
-func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
-func (s *settableLogger) V(l int) bool { return s.get().V(l) }
-func (s *settableLogger) Lvl(lvl int) Logger {
- s.mu.RLock()
- l := s.l
- s.mu.RUnlock()
- if l.V(lvl) {
- return s
- }
- return &noLogger{}
-}
-
-type noLogger struct{}
-
-func (*noLogger) Info(args ...interface{}) {}
-func (*noLogger) Infof(format string, args ...interface{}) {}
-func (*noLogger) Infoln(args ...interface{}) {}
-func (*noLogger) Warning(args ...interface{}) {}
-func (*noLogger) Warningf(format string, args ...interface{}) {}
-func (*noLogger) Warningln(args ...interface{}) {}
-func (*noLogger) Error(args ...interface{}) {}
-func (*noLogger) Errorf(format string, args ...interface{}) {}
-func (*noLogger) Errorln(args ...interface{}) {}
-func (*noLogger) Fatal(args ...interface{}) {}
-func (*noLogger) Fatalf(format string, args ...interface{}) {}
-func (*noLogger) Fatalln(args ...interface{}) {}
-func (*noLogger) Print(args ...interface{}) {}
-func (*noLogger) Printf(format string, args ...interface{}) {}
-func (*noLogger) Println(args ...interface{}) {}
-func (*noLogger) V(l int) bool { return false }
-func (ng *noLogger) Lvl(lvl int) Logger { return ng }
diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go
deleted file mode 100644
index f60cfbe..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "io"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- DefragmentResponse pb.DefragmentResponse
- AlarmResponse pb.AlarmResponse
- AlarmMember pb.AlarmMember
- StatusResponse pb.StatusResponse
- HashKVResponse pb.HashKVResponse
- MoveLeaderResponse pb.MoveLeaderResponse
-)
-
-type Maintenance interface {
- // AlarmList gets all active alarms.
- AlarmList(ctx context.Context) (*AlarmResponse, error)
-
- // AlarmDisarm disarms a given alarm.
- AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
-
- // Defragment releases wasted space from internal fragmentation on a given etcd member.
- // Defragment is only needed when deleting a large number of keys and want to reclaim
- // the resources.
- // Defragment is an expensive operation. User should avoid defragmenting multiple members
- // at the same time.
- // To defragment multiple members in the cluster, user need to call defragment multiple
- // times with different endpoints.
- Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
-
- // Status gets the status of the endpoint.
- Status(ctx context.Context, endpoint string) (*StatusResponse, error)
-
- // HashKV returns a hash of the KV state at the time of the RPC.
- // If revision is zero, the hash is computed on all keys. If the revision
- // is non-zero, the hash is computed on all keys at or below the given revision.
- HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
-
- // Snapshot provides a reader for a point-in-time snapshot of etcd.
- Snapshot(ctx context.Context) (io.ReadCloser, error)
-
- // MoveLeader requests current leader to transfer its leadership to the transferee.
- // Request must be made to the leader.
- MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
-}
-
-type maintenance struct {
- dial func(endpoint string) (pb.MaintenanceClient, func(), error)
- remote pb.MaintenanceClient
- callOpts []grpc.CallOption
-}
-
-func NewMaintenance(c *Client) Maintenance {
- api := &maintenance{
- dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
- conn, err := c.dial(endpoint)
- if err != nil {
- return nil, nil, err
- }
- cancel := func() { conn.Close() }
- return RetryMaintenanceClient(c, conn), cancel, nil
- },
- remote: RetryMaintenanceClient(c, c.conn),
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
- api := &maintenance{
- dial: func(string) (pb.MaintenanceClient, func(), error) {
- return remote, func() {}, nil
- },
- remote: remote,
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_GET,
- MemberID: 0, // all
- Alarm: pb.AlarmType_NONE, // all
- }
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_DEACTIVATE,
- MemberID: am.MemberID,
- Alarm: am.Alarm,
- }
-
- if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
- ar, err := m.AlarmList(ctx)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- ret := AlarmResponse{}
- for _, am := range ar.Alarms {
- dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
- if derr != nil {
- return nil, toErr(ctx, derr)
- }
- ret.Alarms = append(ret.Alarms, dresp.Alarms...)
- }
- return &ret, nil
- }
-
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*DefragmentResponse)(resp), nil
-}
-
-func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*StatusResponse)(resp), nil
-}
-
-func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*HashKVResponse)(resp), nil
-}
-
-func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
- ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- pr, pw := io.Pipe()
- go func() {
- for {
- resp, err := ss.Recv()
- if err != nil {
- pw.CloseWithError(err)
- return
- }
- if resp == nil && err == nil {
- break
- }
- if _, werr := pw.Write(resp.Blob); werr != nil {
- pw.CloseWithError(werr)
- return
- }
- }
- pw.Close()
- }()
- return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
-}
-
-type snapshotReadCloser struct {
- ctx context.Context
- io.ReadCloser
-}
-
-func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
- n, err = rc.ReadCloser.Read(p)
- return n, toErr(rc.ctx, err)
-}
-
-func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
- resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
- return (*MoveLeaderResponse)(resp), toErr(ctx, err)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go b/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go
deleted file mode 100644
index 3f88332..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/namespace/doc.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package namespace is a clientv3 wrapper that translates all keys to begin
-// with a given prefix.
-//
-// First, create a client:
-//
-// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
-// if err != nil {
-// // handle error!
-// }
-//
-// Next, override the client interfaces:
-//
-// unprefixedKV := cli.KV
-// cli.KV = namespace.NewKV(cli.KV, "my-prefix/")
-// cli.Watcher = namespace.NewWatcher(cli.Watcher, "my-prefix/")
-// cli.Lease = namespace.NewLease(cli.Lease, "my-prefix/")
-//
-// Now calls using 'cli' will namespace / prefix all keys with "my-prefix/":
-//
-// cli.Put(context.TODO(), "abc", "123")
-// resp, _ := unprefixedKV.Get(context.TODO(), "my-prefix/abc")
-// fmt.Printf("%s\n", resp.Kvs[0].Value)
-// // Output: 123
-// unprefixedKV.Put(context.TODO(), "my-prefix/abc", "456")
-// resp, _ = cli.Get("abc")
-// fmt.Printf("%s\n", resp.Kvs[0].Value)
-// // Output: 456
-//
-package namespace
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go b/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
deleted file mode 100644
index 13dd83a..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/namespace/kv.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-import (
- "context"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type kvPrefix struct {
- clientv3.KV
- pfx string
-}
-
-// NewKV wraps a KV instance so that all requests
-// are prefixed with a given string.
-func NewKV(kv clientv3.KV, prefix string) clientv3.KV {
- return &kvPrefix{kv, prefix}
-}
-
-func (kv *kvPrefix) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
- if len(key) == 0 {
- return nil, rpctypes.ErrEmptyKey
- }
- op := kv.prefixOp(clientv3.OpPut(key, val, opts...))
- r, err := kv.KV.Do(ctx, op)
- if err != nil {
- return nil, err
- }
- put := r.Put()
- kv.unprefixPutResponse(put)
- return put, nil
-}
-
-func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
- if len(key) == 0 {
- return nil, rpctypes.ErrEmptyKey
- }
- r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...)))
- if err != nil {
- return nil, err
- }
- get := r.Get()
- kv.unprefixGetResponse(get)
- return get, nil
-}
-
-func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.DeleteResponse, error) {
- if len(key) == 0 {
- return nil, rpctypes.ErrEmptyKey
- }
- r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpDelete(key, opts...)))
- if err != nil {
- return nil, err
- }
- del := r.Del()
- kv.unprefixDeleteResponse(del)
- return del, nil
-}
-
-func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) {
- if len(op.KeyBytes()) == 0 && !op.IsTxn() {
- return clientv3.OpResponse{}, rpctypes.ErrEmptyKey
- }
- r, err := kv.KV.Do(ctx, kv.prefixOp(op))
- if err != nil {
- return r, err
- }
- switch {
- case r.Get() != nil:
- kv.unprefixGetResponse(r.Get())
- case r.Put() != nil:
- kv.unprefixPutResponse(r.Put())
- case r.Del() != nil:
- kv.unprefixDeleteResponse(r.Del())
- case r.Txn() != nil:
- kv.unprefixTxnResponse(r.Txn())
- }
- return r, nil
-}
-
-type txnPrefix struct {
- clientv3.Txn
- kv *kvPrefix
-}
-
-func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn {
- return &txnPrefix{kv.KV.Txn(ctx), kv}
-}
-
-func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn {
- txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...)
- return txn
-}
-
-func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn {
- txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...)
- return txn
-}
-
-func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn {
- txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...)
- return txn
-}
-
-func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) {
- resp, err := txn.Txn.Commit()
- if err != nil {
- return nil, err
- }
- txn.kv.unprefixTxnResponse(resp)
- return resp, nil
-}
-
-func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op {
- if !op.IsTxn() {
- begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes())
- op.WithKeyBytes(begin)
- op.WithRangeBytes(end)
- return op
- }
- cmps, thenOps, elseOps := op.Txn()
- return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps))
-}
-
-func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) {
- for i := range resp.Kvs {
- resp.Kvs[i].Key = resp.Kvs[i].Key[len(kv.pfx):]
- }
-}
-
-func (kv *kvPrefix) unprefixPutResponse(resp *clientv3.PutResponse) {
- if resp.PrevKv != nil {
- resp.PrevKv.Key = resp.PrevKv.Key[len(kv.pfx):]
- }
-}
-
-func (kv *kvPrefix) unprefixDeleteResponse(resp *clientv3.DeleteResponse) {
- for i := range resp.PrevKvs {
- resp.PrevKvs[i].Key = resp.PrevKvs[i].Key[len(kv.pfx):]
- }
-}
-
-func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) {
- for _, r := range resp.Responses {
- switch tv := r.Response.(type) {
- case *pb.ResponseOp_ResponseRange:
- if tv.ResponseRange != nil {
- kv.unprefixGetResponse((*clientv3.GetResponse)(tv.ResponseRange))
- }
- case *pb.ResponseOp_ResponsePut:
- if tv.ResponsePut != nil {
- kv.unprefixPutResponse((*clientv3.PutResponse)(tv.ResponsePut))
- }
- case *pb.ResponseOp_ResponseDeleteRange:
- if tv.ResponseDeleteRange != nil {
- kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange))
- }
- case *pb.ResponseOp_ResponseTxn:
- if tv.ResponseTxn != nil {
- kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn))
- }
- default:
- }
- }
-}
-
-func (kv *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) {
- return prefixInterval(kv.pfx, key, end)
-}
-
-func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp {
- newCmps := make([]clientv3.Cmp, len(cs))
- for i := range cs {
- newCmps[i] = cs[i]
- pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), cs[i].RangeEnd)
- newCmps[i].WithKeyBytes(pfxKey)
- if len(cs[i].RangeEnd) != 0 {
- newCmps[i].RangeEnd = endKey
- }
- }
- return newCmps
-}
-
-func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op {
- newOps := make([]clientv3.Op, len(ops))
- for i := range ops {
- newOps[i] = kv.prefixOp(ops[i])
- }
- return newOps
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go b/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
deleted file mode 100644
index f092106..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/namespace/lease.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-import (
- "bytes"
- "context"
-
- "github.com/coreos/etcd/clientv3"
-)
-
-type leasePrefix struct {
- clientv3.Lease
- pfx []byte
-}
-
-// NewLease wraps a Lease interface to filter for only keys with a prefix
-// and remove that prefix when fetching attached keys through TimeToLive.
-func NewLease(l clientv3.Lease, prefix string) clientv3.Lease {
- return &leasePrefix{l, []byte(prefix)}
-}
-
-func (l *leasePrefix) TimeToLive(ctx context.Context, id clientv3.LeaseID, opts ...clientv3.LeaseOption) (*clientv3.LeaseTimeToLiveResponse, error) {
- resp, err := l.Lease.TimeToLive(ctx, id, opts...)
- if err != nil {
- return nil, err
- }
- if len(resp.Keys) > 0 {
- var outKeys [][]byte
- for i := range resp.Keys {
- if len(resp.Keys[i]) < len(l.pfx) {
- // too short
- continue
- }
- if !bytes.Equal(resp.Keys[i][:len(l.pfx)], l.pfx) {
- // doesn't match prefix
- continue
- }
- // strip prefix
- outKeys = append(outKeys, resp.Keys[i][len(l.pfx):])
- }
- resp.Keys = outKeys
- }
- return resp, nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/util.go b/vendor/github.com/coreos/etcd/clientv3/namespace/util.go
deleted file mode 100644
index ecf0404..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/namespace/util.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-func prefixInterval(pfx string, key, end []byte) (pfxKey []byte, pfxEnd []byte) {
- pfxKey = make([]byte, len(pfx)+len(key))
- copy(pfxKey[copy(pfxKey, pfx):], key)
-
- if len(end) == 1 && end[0] == 0 {
- // the edge of the keyspace
- pfxEnd = make([]byte, len(pfx))
- copy(pfxEnd, pfx)
- ok := false
- for i := len(pfxEnd) - 1; i >= 0; i-- {
- if pfxEnd[i]++; pfxEnd[i] != 0 {
- ok = true
- break
- }
- }
- if !ok {
- // 0xff..ff => 0x00
- pfxEnd = []byte{0}
- }
- } else if len(end) >= 1 {
- pfxEnd = make([]byte, len(pfx)+len(end))
- copy(pfxEnd[copy(pfxEnd, pfx):], end)
- }
-
- return pfxKey, pfxEnd
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go b/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
deleted file mode 100644
index 5a9596d..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/namespace/watch.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package namespace
-
-import (
- "context"
- "sync"
-
- "github.com/coreos/etcd/clientv3"
-)
-
-type watcherPrefix struct {
- clientv3.Watcher
- pfx string
-
- wg sync.WaitGroup
- stopc chan struct{}
- stopOnce sync.Once
-}
-
-// NewWatcher wraps a Watcher instance so that all Watch requests
-// are prefixed with a given string and all Watch responses have
-// the prefix removed.
-func NewWatcher(w clientv3.Watcher, prefix string) clientv3.Watcher {
- return &watcherPrefix{Watcher: w, pfx: prefix, stopc: make(chan struct{})}
-}
-
-func (w *watcherPrefix) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
- // since OpOption is opaque, determine range for prefixing through an OpGet
- op := clientv3.OpGet(key, opts...)
- end := op.RangeBytes()
- pfxBegin, pfxEnd := prefixInterval(w.pfx, []byte(key), end)
- if pfxEnd != nil {
- opts = append(opts, clientv3.WithRange(string(pfxEnd)))
- }
-
- wch := w.Watcher.Watch(ctx, string(pfxBegin), opts...)
-
- // translate watch events from prefixed to unprefixed
- pfxWch := make(chan clientv3.WatchResponse)
- w.wg.Add(1)
- go func() {
- defer func() {
- close(pfxWch)
- w.wg.Done()
- }()
- for wr := range wch {
- for i := range wr.Events {
- wr.Events[i].Kv.Key = wr.Events[i].Kv.Key[len(w.pfx):]
- if wr.Events[i].PrevKv != nil {
- wr.Events[i].PrevKv.Key = wr.Events[i].Kv.Key
- }
- }
- select {
- case pfxWch <- wr:
- case <-ctx.Done():
- return
- case <-w.stopc:
- return
- }
- }
- }()
- return pfxWch
-}
-
-func (w *watcherPrefix) Close() error {
- err := w.Watcher.Close()
- w.stopOnce.Do(func() { close(w.stopc) })
- w.wg.Wait()
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/doc.go b/vendor/github.com/coreos/etcd/clientv3/naming/doc.go
deleted file mode 100644
index 71608cc..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/naming/doc.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services.
-//
-// To use, first import the packages:
-//
-// import (
-// "github.com/coreos/etcd/clientv3"
-// etcdnaming "github.com/coreos/etcd/clientv3/naming"
-//
-// "google.golang.org/grpc"
-// "google.golang.org/grpc/naming"
-// )
-//
-// First, register new endpoint addresses for a service:
-//
-// func etcdAdd(c *clientv3.Client, service, addr string) error {
-// r := &etcdnaming.GRPCResolver{Client: c}
-// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr})
-// }
-//
-// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer:
-//
-// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) {
-// r := &etcdnaming.GRPCResolver{Client: c}
-// b := grpc.RoundRobin(r)
-// return grpc.Dial(service, grpc.WithBalancer(b))
-// }
-//
-// Optionally, force delete an endpoint:
-//
-// func etcdDelete(c *clientv3, service, addr string) error {
-// r := &etcdnaming.GRPCResolver{Client: c}
-// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"})
-// }
-//
-// Or register an expiring endpoint with a lease:
-//
-// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error {
-// r := &etcdnaming.GRPCResolver{Client: c}
-// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
-// }
-//
-package naming
diff --git a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go b/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
deleted file mode 100644
index 3c0e8e6..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/naming/grpc.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package naming
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- etcd "github.com/coreos/etcd/clientv3"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/naming"
- "google.golang.org/grpc/status"
-)
-
-var ErrWatcherClosed = fmt.Errorf("naming: watch closed")
-
-// GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.
-type GRPCResolver struct {
- // Client is an initialized etcd client.
- Client *etcd.Client
-}
-
-func (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {
- switch nm.Op {
- case naming.Add:
- var v []byte
- if v, err = json.Marshal(nm); err != nil {
- return status.Error(codes.InvalidArgument, err.Error())
- }
- _, err = gr.Client.KV.Put(ctx, target+"/"+nm.Addr, string(v), opts...)
- case naming.Delete:
- _, err = gr.Client.Delete(ctx, target+"/"+nm.Addr, opts...)
- default:
- return status.Error(codes.InvalidArgument, "naming: bad naming op")
- }
- return err
-}
-
-func (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {
- ctx, cancel := context.WithCancel(context.Background())
- w := &gRPCWatcher{c: gr.Client, target: target + "/", ctx: ctx, cancel: cancel}
- return w, nil
-}
-
-type gRPCWatcher struct {
- c *etcd.Client
- target string
- ctx context.Context
- cancel context.CancelFunc
- wch etcd.WatchChan
- err error
-}
-
-// Next gets the next set of updates from the etcd resolver.
-// Calls to Next should be serialized; concurrent calls are not safe since
-// there is no way to reconcile the update ordering.
-func (gw *gRPCWatcher) Next() ([]*naming.Update, error) {
- if gw.wch == nil {
- // first Next() returns all addresses
- return gw.firstNext()
- }
- if gw.err != nil {
- return nil, gw.err
- }
-
- // process new events on target/*
- wr, ok := <-gw.wch
- if !ok {
- gw.err = status.Error(codes.Unavailable, ErrWatcherClosed.Error())
- return nil, gw.err
- }
- if gw.err = wr.Err(); gw.err != nil {
- return nil, gw.err
- }
-
- updates := make([]*naming.Update, 0, len(wr.Events))
- for _, e := range wr.Events {
- var jupdate naming.Update
- var err error
- switch e.Type {
- case etcd.EventTypePut:
- err = json.Unmarshal(e.Kv.Value, &jupdate)
- jupdate.Op = naming.Add
- case etcd.EventTypeDelete:
- err = json.Unmarshal(e.PrevKv.Value, &jupdate)
- jupdate.Op = naming.Delete
- }
- if err == nil {
- updates = append(updates, &jupdate)
- }
- }
- return updates, nil
-}
-
-func (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) {
- // Use serialized request so resolution still works if the target etcd
- // server is partitioned away from the quorum.
- resp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable())
- if gw.err = err; err != nil {
- return nil, err
- }
-
- updates := make([]*naming.Update, 0, len(resp.Kvs))
- for _, kv := range resp.Kvs {
- var jupdate naming.Update
- if err := json.Unmarshal(kv.Value, &jupdate); err != nil {
- continue
- }
- updates = append(updates, &jupdate)
- }
-
- opts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()}
- gw.wch = gw.c.Watch(gw.ctx, gw.target, opts...)
- return updates, nil
-}
-
-func (gw *gRPCWatcher) Close() { gw.cancel() }
diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go
deleted file mode 100644
index c6ec5bf..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/op.go
+++ /dev/null
@@ -1,513 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-type opType int
-
-const (
- // A default Op has opType 0, which is invalid.
- tRange opType = iota + 1
- tPut
- tDeleteRange
- tTxn
-)
-
-var (
- noPrefixEnd = []byte{0}
-)
-
-// Op represents an Operation that kv can execute.
-type Op struct {
- t opType
- key []byte
- end []byte
-
- // for range
- limit int64
- sort *SortOption
- serializable bool
- keysOnly bool
- countOnly bool
- minModRev int64
- maxModRev int64
- minCreateRev int64
- maxCreateRev int64
-
- // for range, watch
- rev int64
-
- // for watch, put, delete
- prevKV bool
-
- // for put
- ignoreValue bool
- ignoreLease bool
-
- // progressNotify is for progress updates.
- progressNotify bool
- // createdNotify is for created event
- createdNotify bool
- // filters for watchers
- filterPut bool
- filterDelete bool
-
- // for put
- val []byte
- leaseID LeaseID
-
- // txn
- cmps []Cmp
- thenOps []Op
- elseOps []Op
-}
-
-// accessors / mutators
-
-func (op Op) IsTxn() bool { return op.t == tTxn }
-func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
-
-// KeyBytes returns the byte slice holding the Op's key.
-func (op Op) KeyBytes() []byte { return op.key }
-
-// WithKeyBytes sets the byte slice for the Op's key.
-func (op *Op) WithKeyBytes(key []byte) { op.key = key }
-
-// RangeBytes returns the byte slice holding with the Op's range end, if any.
-func (op Op) RangeBytes() []byte { return op.end }
-
-// Rev returns the requested revision, if any.
-func (op Op) Rev() int64 { return op.rev }
-
-// IsPut returns true iff the operation is a Put.
-func (op Op) IsPut() bool { return op.t == tPut }
-
-// IsGet returns true iff the operation is a Get.
-func (op Op) IsGet() bool { return op.t == tRange }
-
-// IsDelete returns true iff the operation is a Delete.
-func (op Op) IsDelete() bool { return op.t == tDeleteRange }
-
-// IsSerializable returns true if the serializable field is true.
-func (op Op) IsSerializable() bool { return op.serializable == true }
-
-// IsKeysOnly returns whether keysOnly is set.
-func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
-
-// IsCountOnly returns whether countOnly is set.
-func (op Op) IsCountOnly() bool { return op.countOnly == true }
-
-// MinModRev returns the operation's minimum modify revision.
-func (op Op) MinModRev() int64 { return op.minModRev }
-
-// MaxModRev returns the operation's maximum modify revision.
-func (op Op) MaxModRev() int64 { return op.maxModRev }
-
-// MinCreateRev returns the operation's minimum create revision.
-func (op Op) MinCreateRev() int64 { return op.minCreateRev }
-
-// MaxCreateRev returns the operation's maximum create revision.
-func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
-
-// WithRangeBytes sets the byte slice for the Op's range end.
-func (op *Op) WithRangeBytes(end []byte) { op.end = end }
-
-// ValueBytes returns the byte slice holding the Op's value, if any.
-func (op Op) ValueBytes() []byte { return op.val }
-
-// WithValueBytes sets the byte slice for the Op's value.
-func (op *Op) WithValueBytes(v []byte) { op.val = v }
-
-func (op Op) toRangeRequest() *pb.RangeRequest {
- if op.t != tRange {
- panic("op.t != tRange")
- }
- r := &pb.RangeRequest{
- Key: op.key,
- RangeEnd: op.end,
- Limit: op.limit,
- Revision: op.rev,
- Serializable: op.serializable,
- KeysOnly: op.keysOnly,
- CountOnly: op.countOnly,
- MinModRevision: op.minModRev,
- MaxModRevision: op.maxModRev,
- MinCreateRevision: op.minCreateRev,
- MaxCreateRevision: op.maxCreateRev,
- }
- if op.sort != nil {
- r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
- r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
- }
- return r
-}
-
-func (op Op) toTxnRequest() *pb.TxnRequest {
- thenOps := make([]*pb.RequestOp, len(op.thenOps))
- for i, tOp := range op.thenOps {
- thenOps[i] = tOp.toRequestOp()
- }
- elseOps := make([]*pb.RequestOp, len(op.elseOps))
- for i, eOp := range op.elseOps {
- elseOps[i] = eOp.toRequestOp()
- }
- cmps := make([]*pb.Compare, len(op.cmps))
- for i := range op.cmps {
- cmps[i] = (*pb.Compare)(&op.cmps[i])
- }
- return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
-}
-
-func (op Op) toRequestOp() *pb.RequestOp {
- switch op.t {
- case tRange:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
- case tPut:
- r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
- case tDeleteRange:
- r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
- case tTxn:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
- default:
- panic("Unknown Op")
- }
-}
-
-func (op Op) isWrite() bool {
- if op.t == tTxn {
- for _, tOp := range op.thenOps {
- if tOp.isWrite() {
- return true
- }
- }
- for _, tOp := range op.elseOps {
- if tOp.isWrite() {
- return true
- }
- }
- return false
- }
- return op.t != tRange
-}
-
-func OpGet(key string, opts ...OpOption) Op {
- ret := Op{t: tRange, key: []byte(key)}
- ret.applyOpts(opts)
- return ret
-}
-
-func OpDelete(key string, opts ...OpOption) Op {
- ret := Op{t: tDeleteRange, key: []byte(key)}
- ret.applyOpts(opts)
- switch {
- case ret.leaseID != 0:
- panic("unexpected lease in delete")
- case ret.limit != 0:
- panic("unexpected limit in delete")
- case ret.rev != 0:
- panic("unexpected revision in delete")
- case ret.sort != nil:
- panic("unexpected sort in delete")
- case ret.serializable:
- panic("unexpected serializable in delete")
- case ret.countOnly:
- panic("unexpected countOnly in delete")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in delete")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in delete")
- case ret.filterDelete, ret.filterPut:
- panic("unexpected filter in delete")
- case ret.createdNotify:
- panic("unexpected createdNotify in delete")
- }
- return ret
-}
-
-func OpPut(key, val string, opts ...OpOption) Op {
- ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
- ret.applyOpts(opts)
- switch {
- case ret.end != nil:
- panic("unexpected range in put")
- case ret.limit != 0:
- panic("unexpected limit in put")
- case ret.rev != 0:
- panic("unexpected revision in put")
- case ret.sort != nil:
- panic("unexpected sort in put")
- case ret.serializable:
- panic("unexpected serializable in put")
- case ret.countOnly:
- panic("unexpected countOnly in put")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in put")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in put")
- case ret.filterDelete, ret.filterPut:
- panic("unexpected filter in put")
- case ret.createdNotify:
- panic("unexpected createdNotify in put")
- }
- return ret
-}
-
-func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
- return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
-}
-
-func opWatch(key string, opts ...OpOption) Op {
- ret := Op{t: tRange, key: []byte(key)}
- ret.applyOpts(opts)
- switch {
- case ret.leaseID != 0:
- panic("unexpected lease in watch")
- case ret.limit != 0:
- panic("unexpected limit in watch")
- case ret.sort != nil:
- panic("unexpected sort in watch")
- case ret.serializable:
- panic("unexpected serializable in watch")
- case ret.countOnly:
- panic("unexpected countOnly in watch")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in watch")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in watch")
- }
- return ret
-}
-
-func (op *Op) applyOpts(opts []OpOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// OpOption configures Operations like Get, Put, Delete.
-type OpOption func(*Op)
-
-// WithLease attaches a lease ID to a key in 'Put' request.
-func WithLease(leaseID LeaseID) OpOption {
- return func(op *Op) { op.leaseID = leaseID }
-}
-
-// WithLimit limits the number of results to return from 'Get' request.
-// If WithLimit is given a 0 limit, it is treated as no limit.
-func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
-
-// WithRev specifies the store revision for 'Get' request.
-// Or the start revision of 'Watch' request.
-func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
-
-// WithSort specifies the ordering in 'Get' request. It requires
-// 'WithRange' and/or 'WithPrefix' to be specified too.
-// 'target' specifies the target to sort by: key, version, revisions, value.
-// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
-func WithSort(target SortTarget, order SortOrder) OpOption {
- return func(op *Op) {
- if target == SortByKey && order == SortAscend {
- // If order != SortNone, server fetches the entire key-space,
- // and then applies the sort and limit, if provided.
- // Since by default the server returns results sorted by keys
- // in lexicographically ascending order, the client should ignore
- // SortOrder if the target is SortByKey.
- order = SortNone
- }
- op.sort = &SortOption{target, order}
- }
-}
-
-// GetPrefixRangeEnd gets the range end of the prefix.
-// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
-func GetPrefixRangeEnd(prefix string) string {
- return string(getPrefix([]byte(prefix)))
-}
-
-func getPrefix(key []byte) []byte {
- end := make([]byte, len(key))
- copy(end, key)
- for i := len(end) - 1; i >= 0; i-- {
- if end[i] < 0xff {
- end[i] = end[i] + 1
- end = end[:i+1]
- return end
- }
- }
- // next prefix does not exist (e.g., 0xffff);
- // default to WithFromKey policy
- return noPrefixEnd
-}
-
-// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
-// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
-// can return 'foo1', 'foo2', and so on.
-func WithPrefix() OpOption {
- return func(op *Op) {
- if len(op.key) == 0 {
- op.key, op.end = []byte{0}, []byte{0}
- return
- }
- op.end = getPrefix(op.key)
- }
-}
-
-// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
-// For example, 'Get' requests with 'WithRange(end)' returns
-// the keys in the range [key, end).
-// endKey must be lexicographically greater than start key.
-func WithRange(endKey string) OpOption {
- return func(op *Op) { op.end = []byte(endKey) }
-}
-
-// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
-// to be equal or greater than the key in the argument.
-func WithFromKey() OpOption { return WithRange("\x00") }
-
-// WithSerializable makes 'Get' request serializable. By default,
-// it's linearizable. Serializable requests are better for lower latency
-// requirement.
-func WithSerializable() OpOption {
- return func(op *Op) { op.serializable = true }
-}
-
-// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
-// values will be omitted.
-func WithKeysOnly() OpOption {
- return func(op *Op) { op.keysOnly = true }
-}
-
-// WithCountOnly makes the 'Get' request return only the count of keys.
-func WithCountOnly() OpOption {
- return func(op *Op) { op.countOnly = true }
-}
-
-// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
-func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
-
-// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
-func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
-
-// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
-func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
-
-// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
-func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
-
-// WithFirstCreate gets the key with the oldest creation revision in the request range.
-func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
-
-// WithLastCreate gets the key with the latest creation revision in the request range.
-func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
-
-// WithFirstKey gets the lexically first key in the request range.
-func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
-
-// WithLastKey gets the lexically last key in the request range.
-func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
-
-// WithFirstRev gets the key with the oldest modification revision in the request range.
-func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
-
-// WithLastRev gets the key with the latest modification revision in the request range.
-func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
-
-// withTop gets the first key over the get's prefix given a sort order
-func withTop(target SortTarget, order SortOrder) []OpOption {
- return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
-}
-
-// WithProgressNotify makes watch server send periodic progress updates
-// every 10 minutes when there is no incoming events.
-// Progress updates have zero events in WatchResponse.
-func WithProgressNotify() OpOption {
- return func(op *Op) {
- op.progressNotify = true
- }
-}
-
-// WithCreatedNotify makes watch server sends the created event.
-func WithCreatedNotify() OpOption {
- return func(op *Op) {
- op.createdNotify = true
- }
-}
-
-// WithFilterPut discards PUT events from the watcher.
-func WithFilterPut() OpOption {
- return func(op *Op) { op.filterPut = true }
-}
-
-// WithFilterDelete discards DELETE events from the watcher.
-func WithFilterDelete() OpOption {
- return func(op *Op) { op.filterDelete = true }
-}
-
-// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
-// nothing will be returned.
-func WithPrevKV() OpOption {
- return func(op *Op) {
- op.prevKV = true
- }
-}
-
-// WithIgnoreValue updates the key using its current value.
-// This option can not be combined with non-empty values.
-// Returns an error if the key does not exist.
-func WithIgnoreValue() OpOption {
- return func(op *Op) {
- op.ignoreValue = true
- }
-}
-
-// WithIgnoreLease updates the key using its current lease.
-// This option can not be combined with WithLease.
-// Returns an error if the key does not exist.
-func WithIgnoreLease() OpOption {
- return func(op *Op) {
- op.ignoreLease = true
- }
-}
-
-// LeaseOp represents an Operation that lease can execute.
-type LeaseOp struct {
- id LeaseID
-
- // for TimeToLive
- attachedKeys bool
-}
-
-// LeaseOption configures lease operations.
-type LeaseOption func(*LeaseOp)
-
-func (op *LeaseOp) applyOpts(opts []LeaseOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
-func WithAttachedKeys() LeaseOption {
- return func(op *LeaseOp) { op.attachedKeys = true }
-}
-
-func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
- ret := &LeaseOp{id: id}
- ret.applyOpts(opts)
- return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/options.go b/vendor/github.com/coreos/etcd/clientv3/options.go
deleted file mode 100644
index fa25811..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/options.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "math"
-
- "google.golang.org/grpc"
-)
-
-var (
- // Disable gRPC internal retrial logic
- // TODO: enable when gRPC retry is stable (FailFast=false)
- // Reference:
- // - https://github.com/grpc/grpc-go/issues/1532
- // - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
- defaultFailFast = grpc.FailFast(true)
-
- // client-side request send limit, gRPC default is math.MaxInt32
- // Make sure that "client-side send limit < server-side default send/recv limit"
- // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
- defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
-
- // client-side response receive limit, gRPC default is 4MB
- // Make sure that "client-side receive limit >= server-side default send/recv limit"
- // because range response can easily exceed request send limits
- // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
- defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
-)
-
-// defaultCallOpts defines a list of default "gRPC.CallOption".
-// Some options are exposed to "clientv3.Config".
-// Defaults will be overridden by the settings in "clientv3.Config".
-var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
-
-// MaxLeaseTTL is the maximum lease TTL value
-const MaxLeaseTTL = 9000000000
diff --git a/vendor/github.com/coreos/etcd/clientv3/ordering/doc.go b/vendor/github.com/coreos/etcd/clientv3/ordering/doc.go
deleted file mode 100644
index 856f330..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/ordering/doc.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ordering is a clientv3 wrapper that caches response header revisions
-// to detect ordering violations from stale responses. Users may define a
-// policy on how to handle the ordering violation, but typically the client
-// should connect to another endpoint and reissue the request.
-//
-// The most common situation where an ordering violation happens is a client
-// reconnects to a partitioned member and issues a serializable read. Since the
-// partitioned member is likely behind the last member, it may return a Get
-// response based on a store revision older than the store revision used to
-// service a prior Get on the former endpoint.
-//
-// First, create a client:
-//
-// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"localhost:2379"}})
-// if err != nil {
-// // handle error!
-// }
-//
-// Next, override the client interface with the ordering wrapper:
-//
-// vf := func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
-// return fmt.Errorf("ordering: issued %+v, got %+v, expected rev=%v", op, resp, prevRev)
-// }
-// cli.KV = ordering.NewKV(cli.KV, vf)
-//
-// Now calls using 'cli' will reject order violations with an error.
-//
-package ordering
diff --git a/vendor/github.com/coreos/etcd/clientv3/ordering/kv.go b/vendor/github.com/coreos/etcd/clientv3/ordering/kv.go
deleted file mode 100644
index dc9926e..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/ordering/kv.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ordering
-
-import (
- "context"
- "sync"
-
- "github.com/coreos/etcd/clientv3"
-)
-
-// kvOrdering ensures that serialized requests do not return
-// get with revisions less than the previous
-// returned revision.
-type kvOrdering struct {
- clientv3.KV
- orderViolationFunc OrderViolationFunc
- prevRev int64
- revMu sync.RWMutex
-}
-
-func NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering {
- return &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}}
-}
-
-func (kv *kvOrdering) getPrevRev() int64 {
- kv.revMu.RLock()
- defer kv.revMu.RUnlock()
- return kv.prevRev
-}
-
-func (kv *kvOrdering) setPrevRev(currRev int64) {
- kv.revMu.Lock()
- defer kv.revMu.Unlock()
- if currRev > kv.prevRev {
- kv.prevRev = currRev
- }
-}
-
-func (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
- // prevRev is stored in a local variable in order to record the prevRev
- // at the beginning of the Get operation, because concurrent
- // access to kvOrdering could change the prevRev field in the
- // middle of the Get operation.
- prevRev := kv.getPrevRev()
- op := clientv3.OpGet(key, opts...)
- for {
- r, err := kv.KV.Do(ctx, op)
- if err != nil {
- return nil, err
- }
- resp := r.Get()
- if resp.Header.Revision == prevRev {
- return resp, nil
- } else if resp.Header.Revision > prevRev {
- kv.setPrevRev(resp.Header.Revision)
- return resp, nil
- }
- err = kv.orderViolationFunc(op, r, prevRev)
- if err != nil {
- return nil, err
- }
- }
-}
-
-func (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn {
- return &txnOrdering{
- kv.KV.Txn(ctx),
- kv,
- ctx,
- sync.Mutex{},
- []clientv3.Cmp{},
- []clientv3.Op{},
- []clientv3.Op{},
- }
-}
-
-// txnOrdering ensures that serialized requests do not return
-// txn responses with revisions less than the previous
-// returned revision.
-type txnOrdering struct {
- clientv3.Txn
- *kvOrdering
- ctx context.Context
- mu sync.Mutex
- cmps []clientv3.Cmp
- thenOps []clientv3.Op
- elseOps []clientv3.Op
-}
-
-func (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
- txn.cmps = cs
- txn.Txn.If(cs...)
- return txn
-}
-
-func (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
- txn.thenOps = ops
- txn.Txn.Then(ops...)
- return txn
-}
-
-func (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
- txn.elseOps = ops
- txn.Txn.Else(ops...)
- return txn
-}
-
-func (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) {
- // prevRev is stored in a local variable in order to record the prevRev
- // at the beginning of the Commit operation, because concurrent
- // access to txnOrdering could change the prevRev field in the
- // middle of the Commit operation.
- prevRev := txn.getPrevRev()
- opTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps)
- for {
- opResp, err := txn.KV.Do(txn.ctx, opTxn)
- if err != nil {
- return nil, err
- }
- txnResp := opResp.Txn()
- if txnResp.Header.Revision >= prevRev {
- txn.setPrevRev(txnResp.Header.Revision)
- return txnResp, nil
- }
- err = txn.orderViolationFunc(opTxn, opResp, prevRev)
- if err != nil {
- return nil, err
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/ordering/util.go b/vendor/github.com/coreos/etcd/clientv3/ordering/util.go
deleted file mode 100644
index 190a591..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/ordering/util.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ordering
-
-import (
- "errors"
- "sync"
- "time"
-
- "github.com/coreos/etcd/clientv3"
-)
-
-type OrderViolationFunc func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error
-
-var ErrNoGreaterRev = errors.New("etcdclient: no cluster members have a revision higher than the previously received revision")
-
-func NewOrderViolationSwitchEndpointClosure(c clientv3.Client) OrderViolationFunc {
- var mu sync.Mutex
- violationCount := 0
- return func(op clientv3.Op, resp clientv3.OpResponse, prevRev int64) error {
- if violationCount > len(c.Endpoints()) {
- return ErrNoGreaterRev
- }
- mu.Lock()
- defer mu.Unlock()
- eps := c.Endpoints()
- // force client to connect to given endpoint by limiting to a single endpoint
- c.SetEndpoints(eps[violationCount%len(eps)])
- // give enough time for operation
- time.Sleep(1 * time.Second)
- // set available endpoints back to all endpoints in to ensure
- // the client has access to all the endpoints.
- c.SetEndpoints(eps...)
- violationCount++
- return nil
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go
deleted file mode 100644
index c6ef585..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import "context"
-
-// TODO: remove this when "FailFast=false" is fixed.
-// See https://github.com/grpc/grpc-go/issues/1532.
-func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error {
- select {
- case <-ready:
- return nil
- case <-rpcCtx.Done():
- return rpcCtx.Err()
- case <-clientCtx.Done():
- return clientCtx.Err()
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go
deleted file mode 100644
index 7f89ba6..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/retry.go
+++ /dev/null
@@ -1,496 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-type retryPolicy uint8
-
-const (
- repeatable retryPolicy = iota
- nonRepeatable
-)
-
-type rpcFunc func(ctx context.Context) error
-type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
-type retryStopErrFunc func(error) bool
-
-// immutable requests (e.g. Get) should be retried unless it's
-// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
-//
-// "isRepeatableStopError" returns "true" when an immutable request
-// is interrupted by server-side or gRPC-side error and its status
-// code is not transient (!= codes.Unavailable).
-//
-// Returning "true" means retry should stop, since client cannot
-// handle itself even with retries.
-func isRepeatableStopError(err error) bool {
- eErr := rpctypes.Error(err)
- // always stop retry on etcd errors
- if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
- return true
- }
- // only retry if unavailable
- ev, _ := status.FromError(err)
- return ev.Code() != codes.Unavailable
-}
-
-// mutable requests (e.g. Put, Delete, Txn) should only be retried
-// when the status code is codes.Unavailable when initial connection
-// has not been established (no pinned endpoint).
-//
-// "isNonRepeatableStopError" returns "true" when a mutable request
-// is interrupted by non-transient error that client cannot handle itself,
-// or transient error while the connection has already been established
-// (pinned endpoint exists).
-//
-// Returning "true" means retry should stop, otherwise it violates
-// write-at-most-once semantics.
-func isNonRepeatableStopError(err error) bool {
- ev, _ := status.FromError(err)
- if ev.Code() != codes.Unavailable {
- return true
- }
- desc := rpctypes.ErrorDesc(err)
- return desc != "there is no address available" && desc != "there is no connection available"
-}
-
-func (c *Client) newRetryWrapper() retryRPCFunc {
- return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
- var isStop retryStopErrFunc
- switch rp {
- case repeatable:
- isStop = isRepeatableStopError
- case nonRepeatable:
- isStop = isNonRepeatableStopError
- }
- for {
- if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
- return err
- }
- pinned := c.balancer.pinned()
- err := f(rpcCtx)
- if err == nil {
- return nil
- }
- logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
-
- if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
- // mark this before endpoint switch is triggered
- c.balancer.hostPortError(pinned, err)
- c.balancer.next()
- logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
- }
-
- if isStop(err) {
- return err
- }
- }
- }
-}
-
-func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc {
- return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
- for {
- pinned := c.balancer.pinned()
- err := retryf(rpcCtx, f, rp)
- if err == nil {
- return nil
- }
- logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
- // always stop retry on etcd errors other than invalid auth token
- if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
- gterr := c.getToken(rpcCtx)
- if gterr != nil {
- logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
- return err // return the original error for simplicity
- }
- continue
- }
- return err
- }
- }
-}
-
-type retryKVClient struct {
- kc pb.KVClient
- retryf retryRPCFunc
-}
-
-// RetryKVClient implements a KVClient.
-func RetryKVClient(c *Client) pb.KVClient {
- return &retryKVClient{
- kc: pb.NewKVClient(c.conn),
- retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
- }
-}
-func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Range(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Put(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
- // TODO: "repeatable" for read-only txn
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Txn(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Compact(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-type retryLeaseClient struct {
- lc pb.LeaseClient
- retryf retryRPCFunc
-}
-
-// RetryLeaseClient implements a LeaseClient.
-func RetryLeaseClient(c *Client) pb.LeaseClient {
- return &retryLeaseClient{
- lc: pb.NewLeaseClient(c.conn),
- retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
- }
-}
-
-func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseLeases(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-
-}
-
-func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
- return err
- }, repeatable)
- return stream, err
-}
-
-type retryClusterClient struct {
- cc pb.ClusterClient
- retryf retryRPCFunc
-}
-
-// RetryClusterClient implements a ClusterClient.
-func RetryClusterClient(c *Client) pb.ClusterClient {
- return &retryClusterClient{
- cc: pb.NewClusterClient(c.conn),
- retryf: c.newRetryWrapper(),
- }
-}
-
-func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberList(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-type retryMaintenanceClient struct {
- mc pb.MaintenanceClient
- retryf retryRPCFunc
-}
-
-// RetryMaintenanceClient implements a Maintenance.
-func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
- return &retryMaintenanceClient{
- mc: pb.NewMaintenanceClient(conn),
- retryf: c.newRetryWrapper(),
- }
-}
-
-func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Alarm(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Status(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Hash(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.HashKV(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- stream, err = rmc.mc.Snapshot(rctx, in, opts...)
- return err
- }, repeatable)
- return stream, err
-}
-
-func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.MoveLeader(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Defragment(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-type retryAuthClient struct {
- ac pb.AuthClient
- retryf retryRPCFunc
-}
-
-// RetryAuthClient implements a AuthClient.
-func RetryAuthClient(c *Client) pb.AuthClient {
- return &retryAuthClient{
- ac: pb.NewAuthClient(c.conn),
- retryf: c.newRetryWrapper(),
- }
-}
-
-func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserList(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserGet(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleGet(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleList(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.AuthEnable(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.AuthDisable(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserAdd(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserDelete(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleAdd(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleDelete(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
-
-func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.Authenticate(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go
deleted file mode 100644
index 2bb9d9a..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/sort.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-type SortTarget int
-type SortOrder int
-
-const (
- SortNone SortOrder = iota
- SortAscend
- SortDescend
-)
-
-const (
- SortByKey SortTarget = iota
- SortByVersion
- SortByCreateRevision
- SortByModRevision
- SortByValue
-)
-
-type SortOption struct {
- Target SortTarget
- Order SortOrder
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go
deleted file mode 100644
index c3c2d24..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/txn.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "sync"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-// Txn is the interface that wraps mini-transactions.
-//
-// Txn(context.TODO()).If(
-// Compare(Value(k1), ">", v1),
-// Compare(Version(k1), "=", 2)
-// ).Then(
-// OpPut(k2,v2), OpPut(k3,v3)
-// ).Else(
-// OpPut(k4,v4), OpPut(k5,v5)
-// ).Commit()
-//
-type Txn interface {
- // If takes a list of comparison. If all comparisons passed in succeed,
- // the operations passed into Then() will be executed. Or the operations
- // passed into Else() will be executed.
- If(cs ...Cmp) Txn
-
- // Then takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() succeed.
- Then(ops ...Op) Txn
-
- // Else takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() fail.
- Else(ops ...Op) Txn
-
- // Commit tries to commit the transaction.
- Commit() (*TxnResponse, error)
-}
-
-type txn struct {
- kv *kv
- ctx context.Context
-
- mu sync.Mutex
- cif bool
- cthen bool
- celse bool
-
- isWrite bool
-
- cmps []*pb.Compare
-
- sus []*pb.RequestOp
- fas []*pb.RequestOp
-
- callOpts []grpc.CallOption
-}
-
-func (txn *txn) If(cs ...Cmp) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cif {
- panic("cannot call If twice!")
- }
-
- if txn.cthen {
- panic("cannot call If after Then!")
- }
-
- if txn.celse {
- panic("cannot call If after Else!")
- }
-
- txn.cif = true
-
- for i := range cs {
- txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
- }
-
- return txn
-}
-
-func (txn *txn) Then(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cthen {
- panic("cannot call Then twice!")
- }
- if txn.celse {
- panic("cannot call Then after Else!")
- }
-
- txn.cthen = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.sus = append(txn.sus, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Else(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.celse {
- panic("cannot call Else twice!")
- }
-
- txn.celse = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.fas = append(txn.fas, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Commit() (*TxnResponse, error) {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
-
- var resp *pb.TxnResponse
- var err error
- resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
- if err != nil {
- return nil, toErr(txn.ctx, err)
- }
- return (*TxnResponse)(resp), nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go
deleted file mode 100644
index d763385..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/watch.go
+++ /dev/null
@@ -1,828 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "fmt"
- "sync"
- "time"
-
- v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-const (
- EventTypeDelete = mvccpb.DELETE
- EventTypePut = mvccpb.PUT
-
- closeSendErrTimeout = 250 * time.Millisecond
-)
-
-type Event mvccpb.Event
-
-type WatchChan <-chan WatchResponse
-
-type Watcher interface {
- // Watch watches on a key or prefix. The watched events will be returned
- // through the returned channel. If revisions waiting to be sent over the
- // watch are compacted, then the watch will be canceled by the server, the
- // client will post a compacted error watch response, and the channel will close.
- Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
-
- // Close closes the watcher and cancels all watch requests.
- Close() error
-}
-
-type WatchResponse struct {
- Header pb.ResponseHeader
- Events []*Event
-
- // CompactRevision is the minimum revision the watcher may receive.
- CompactRevision int64
-
- // Canceled is used to indicate watch failure.
- // If the watch failed and the stream was about to close, before the channel is closed,
- // the channel sends a final response that has Canceled set to true with a non-nil Err().
- Canceled bool
-
- // Created is used to indicate the creation of the watcher.
- Created bool
-
- closeErr error
-
- // cancelReason is a reason of canceling watch
- cancelReason string
-}
-
-// IsCreate returns true if the event tells that the key is newly created.
-func (e *Event) IsCreate() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
-}
-
-// IsModify returns true if the event tells that a new value is put on existing key.
-func (e *Event) IsModify() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
-}
-
-// Err is the error value if this WatchResponse holds an error.
-func (wr *WatchResponse) Err() error {
- switch {
- case wr.closeErr != nil:
- return v3rpc.Error(wr.closeErr)
- case wr.CompactRevision != 0:
- return v3rpc.ErrCompacted
- case wr.Canceled:
- if len(wr.cancelReason) != 0 {
- return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
- }
- return v3rpc.ErrFutureRev
- }
- return nil
-}
-
-// IsProgressNotify returns true if the WatchResponse is progress notification.
-func (wr *WatchResponse) IsProgressNotify() bool {
- return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
-}
-
-// watcher implements the Watcher interface
-type watcher struct {
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // mu protects the grpc streams map
- mu sync.RWMutex
-
- // streams holds all the active grpc streams keyed by ctx value.
- streams map[string]*watchGrpcStream
-}
-
-// watchGrpcStream tracks all watch resources attached to a single grpc stream.
-type watchGrpcStream struct {
- owner *watcher
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // ctx controls internal remote.Watch requests
- ctx context.Context
- // ctxKey is the key used when looking up this stream's context
- ctxKey string
- cancel context.CancelFunc
-
- // substreams holds all active watchers on this grpc stream
- substreams map[int64]*watcherStream
- // resuming holds all resuming watchers on this grpc stream
- resuming []*watcherStream
-
- // reqc sends a watch request from Watch() to the main goroutine
- reqc chan *watchRequest
- // respc receives data from the watch client
- respc chan *pb.WatchResponse
- // donec closes to broadcast shutdown
- donec chan struct{}
- // errc transmits errors from grpc Recv to the watch stream reconnect logic
- errc chan error
- // closingc gets the watcherStream of closing watchers
- closingc chan *watcherStream
- // wg is Done when all substream goroutines have exited
- wg sync.WaitGroup
-
- // resumec closes to signal that all substreams should begin resuming
- resumec chan struct{}
- // closeErr is the error that closed the watch stream
- closeErr error
-}
-
-// watchRequest is issued by the subscriber to start a new watcher
-type watchRequest struct {
- ctx context.Context
- key string
- end string
- rev int64
- // send created notification event if this field is true
- createdNotify bool
- // progressNotify is for progress updates
- progressNotify bool
- // filters is the list of events to filter out
- filters []pb.WatchCreateRequest_FilterType
- // get the previous key-value pair before the event happens
- prevKV bool
- // retc receives a chan WatchResponse once the watcher is established
- retc chan chan WatchResponse
-}
-
-// watcherStream represents a registered watcher
-type watcherStream struct {
- // initReq is the request that initiated this request
- initReq watchRequest
-
- // outc publishes watch responses to subscriber
- outc chan WatchResponse
- // recvc buffers watch responses before publishing
- recvc chan *WatchResponse
- // donec closes when the watcherStream goroutine stops.
- donec chan struct{}
- // closing is set to true when stream should be scheduled to shutdown.
- closing bool
- // id is the registered watch id on the grpc stream
- id int64
-
- // buf holds all events received from etcd but not yet consumed by the client
- buf []*WatchResponse
-}
-
-func NewWatcher(c *Client) Watcher {
- return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
-}
-
-func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
- w := &watcher{
- remote: wc,
- streams: make(map[string]*watchGrpcStream),
- }
- if c != nil {
- w.callOpts = c.callOpts
- }
- return w
-}
-
-// never closes
-var valCtxCh = make(chan struct{})
-var zeroTime = time.Unix(0, 0)
-
-// ctx with only the values; never Done
-type valCtx struct{ context.Context }
-
-func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
-func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
-func (vc *valCtx) Err() error { return nil }
-
-func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
- ctx, cancel := context.WithCancel(&valCtx{inctx})
- wgs := &watchGrpcStream{
- owner: w,
- remote: w.remote,
- callOpts: w.callOpts,
- ctx: ctx,
- ctxKey: streamKeyFromCtx(inctx),
- cancel: cancel,
- substreams: make(map[int64]*watcherStream),
- respc: make(chan *pb.WatchResponse),
- reqc: make(chan *watchRequest),
- donec: make(chan struct{}),
- errc: make(chan error, 1),
- closingc: make(chan *watcherStream),
- resumec: make(chan struct{}),
- }
- go wgs.run()
- return wgs
-}
-
-// Watch posts a watch request to run() and waits for a new watcher channel
-func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
- ow := opWatch(key, opts...)
-
- var filters []pb.WatchCreateRequest_FilterType
- if ow.filterPut {
- filters = append(filters, pb.WatchCreateRequest_NOPUT)
- }
- if ow.filterDelete {
- filters = append(filters, pb.WatchCreateRequest_NODELETE)
- }
-
- wr := &watchRequest{
- ctx: ctx,
- createdNotify: ow.createdNotify,
- key: string(ow.key),
- end: string(ow.end),
- rev: ow.rev,
- progressNotify: ow.progressNotify,
- filters: filters,
- prevKV: ow.prevKV,
- retc: make(chan chan WatchResponse, 1),
- }
-
- ok := false
- ctxKey := streamKeyFromCtx(ctx)
-
- // find or allocate appropriate grpc watch stream
- w.mu.Lock()
- if w.streams == nil {
- // closed
- w.mu.Unlock()
- ch := make(chan WatchResponse)
- close(ch)
- return ch
- }
- wgs := w.streams[ctxKey]
- if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
- w.streams[ctxKey] = wgs
- }
- donec := wgs.donec
- reqc := wgs.reqc
- w.mu.Unlock()
-
- // couldn't create channel; return closed channel
- closeCh := make(chan WatchResponse, 1)
-
- // submit request
- select {
- case reqc <- wr:
- ok = true
- case <-wr.ctx.Done():
- case <-donec:
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- return w.Watch(ctx, key, opts...)
- }
-
- // receive channel
- if ok {
- select {
- case ret := <-wr.retc:
- return ret
- case <-ctx.Done():
- case <-donec:
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- return w.Watch(ctx, key, opts...)
- }
- }
-
- close(closeCh)
- return closeCh
-}
-
-func (w *watcher) Close() (err error) {
- w.mu.Lock()
- streams := w.streams
- w.streams = nil
- w.mu.Unlock()
- for _, wgs := range streams {
- if werr := wgs.close(); werr != nil {
- err = werr
- }
- }
- return err
-}
-
-func (w *watchGrpcStream) close() (err error) {
- w.cancel()
- <-w.donec
- select {
- case err = <-w.errc:
- default:
- }
- return toErr(w.ctx, err)
-}
-
-func (w *watcher) closeStream(wgs *watchGrpcStream) {
- w.mu.Lock()
- close(wgs.donec)
- wgs.cancel()
- if w.streams != nil {
- delete(w.streams, wgs.ctxKey)
- }
- w.mu.Unlock()
-}
-
-func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
- if resp.WatchId == -1 {
- // failed; no channel
- close(ws.recvc)
- return
- }
- ws.id = resp.WatchId
- w.substreams[ws.id] = ws
-}
-
-func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
- select {
- case ws.outc <- *resp:
- case <-ws.initReq.ctx.Done():
- case <-time.After(closeSendErrTimeout):
- }
- close(ws.outc)
-}
-
-func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
- // send channel response in case stream was never established
- select {
- case ws.initReq.retc <- ws.outc:
- default:
- }
- // close subscriber's channel
- if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
- go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
- } else if ws.outc != nil {
- close(ws.outc)
- }
- if ws.id != -1 {
- delete(w.substreams, ws.id)
- return
- }
- for i := range w.resuming {
- if w.resuming[i] == ws {
- w.resuming[i] = nil
- return
- }
- }
-}
-
-// run is the root of the goroutines for managing a watcher client
-func (w *watchGrpcStream) run() {
- var wc pb.Watch_WatchClient
- var closeErr error
-
- // substreams marked to close but goroutine still running; needed for
- // avoiding double-closing recvc on grpc stream teardown
- closing := make(map[*watcherStream]struct{})
-
- defer func() {
- w.closeErr = closeErr
- // shutdown substreams and resuming substreams
- for _, ws := range w.substreams {
- if _, ok := closing[ws]; !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- for _, ws := range w.resuming {
- if _, ok := closing[ws]; ws != nil && !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- w.joinSubstreams()
- for range closing {
- w.closeSubstream(<-w.closingc)
- }
- w.wg.Wait()
- w.owner.closeStream(w)
- }()
-
- // start a stream with the etcd grpc server
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
-
- cancelSet := make(map[int64]struct{})
-
- for {
- select {
- // Watch() requested
- case wreq := <-w.reqc:
- outc := make(chan WatchResponse, 1)
- ws := &watcherStream{
- initReq: *wreq,
- id: -1,
- outc: outc,
- // unbuffered so resumes won't cause repeat events
- recvc: make(chan *WatchResponse),
- }
-
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
-
- // queue up for watcher creation/resume
- w.resuming = append(w.resuming, ws)
- if len(w.resuming) == 1 {
- // head of resume queue, can register a new watcher
- wc.Send(ws.initReq.toPB())
- }
- // New events from the watch client
- case pbresp := <-w.respc:
- switch {
- case pbresp.Created:
- // response to head of queue creation
- if ws := w.resuming[0]; ws != nil {
- w.addSubstream(pbresp, ws)
- w.dispatchEvent(pbresp)
- w.resuming[0] = nil
- }
- if ws := w.nextResume(); ws != nil {
- wc.Send(ws.initReq.toPB())
- }
- case pbresp.Canceled && pbresp.CompactRevision == 0:
- delete(cancelSet, pbresp.WatchId)
- if ws, ok := w.substreams[pbresp.WatchId]; ok {
- // signal to stream goroutine to update closingc
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- default:
- // dispatch to appropriate watch stream
- if ok := w.dispatchEvent(pbresp); ok {
- break
- }
- // watch response on unexpected watch id; cancel id
- if _, ok := cancelSet[pbresp.WatchId]; ok {
- break
- }
- cancelSet[pbresp.WatchId] = struct{}{}
- cr := &pb.WatchRequest_CancelRequest{
- CancelRequest: &pb.WatchCancelRequest{
- WatchId: pbresp.WatchId,
- },
- }
- req := &pb.WatchRequest{RequestUnion: cr}
- wc.Send(req)
- }
- // watch client failed on Recv; spawn another if possible
- case err := <-w.errc:
- if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
- closeErr = err
- return
- }
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
- if ws := w.nextResume(); ws != nil {
- wc.Send(ws.initReq.toPB())
- }
- cancelSet = make(map[int64]struct{})
- case <-w.ctx.Done():
- return
- case ws := <-w.closingc:
- w.closeSubstream(ws)
- delete(closing, ws)
- if len(w.substreams)+len(w.resuming) == 0 {
- // no more watchers on this stream, shutdown
- return
- }
- }
- }
-}
-
-// nextResume chooses the next resuming to register with the grpc stream. Abandoned
-// streams are marked as nil in the queue since the head must wait for its inflight registration.
-func (w *watchGrpcStream) nextResume() *watcherStream {
- for len(w.resuming) != 0 {
- if w.resuming[0] != nil {
- return w.resuming[0]
- }
- w.resuming = w.resuming[1:len(w.resuming)]
- }
- return nil
-}
-
-// dispatchEvent sends a WatchResponse to the appropriate watcher stream
-func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
- events := make([]*Event, len(pbresp.Events))
- for i, ev := range pbresp.Events {
- events[i] = (*Event)(ev)
- }
- wr := &WatchResponse{
- Header: *pbresp.Header,
- Events: events,
- CompactRevision: pbresp.CompactRevision,
- Created: pbresp.Created,
- Canceled: pbresp.Canceled,
- cancelReason: pbresp.CancelReason,
- }
- ws, ok := w.substreams[pbresp.WatchId]
- if !ok {
- return false
- }
- select {
- case ws.recvc <- wr:
- case <-ws.donec:
- return false
- }
- return true
-}
-
-// serveWatchClient forwards messages from the grpc stream to run()
-func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
- for {
- resp, err := wc.Recv()
- if err != nil {
- select {
- case w.errc <- err:
- case <-w.donec:
- }
- return
- }
- select {
- case w.respc <- resp:
- case <-w.donec:
- return
- }
- }
-}
-
-// serveSubstream forwards watch responses from run() to the subscriber
-func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
- if ws.closing {
- panic("created substream goroutine but substream is closing")
- }
-
- // nextRev is the minimum expected next revision
- nextRev := ws.initReq.rev
- resuming := false
- defer func() {
- if !resuming {
- ws.closing = true
- }
- close(ws.donec)
- if !resuming {
- w.closingc <- ws
- }
- w.wg.Done()
- }()
-
- emptyWr := &WatchResponse{}
- for {
- curWr := emptyWr
- outc := ws.outc
-
- if len(ws.buf) > 0 {
- curWr = ws.buf[0]
- } else {
- outc = nil
- }
- select {
- case outc <- *curWr:
- if ws.buf[0].Err() != nil {
- return
- }
- ws.buf[0] = nil
- ws.buf = ws.buf[1:]
- case wr, ok := <-ws.recvc:
- if !ok {
- // shutdown from closeSubstream
- return
- }
-
- if wr.Created {
- if ws.initReq.retc != nil {
- ws.initReq.retc <- ws.outc
- // to prevent next write from taking the slot in buffered channel
- // and posting duplicate create events
- ws.initReq.retc = nil
-
- // send first creation event only if requested
- if ws.initReq.createdNotify {
- ws.outc <- *wr
- }
- // once the watch channel is returned, a current revision
- // watch must resume at the store revision. This is necessary
- // for the following case to work as expected:
- // wch := m1.Watch("a")
- // m2.Put("a", "b")
- // <-wch
- // If the revision is only bound on the first observed event,
- // if wch is disconnected before the Put is issued, then reconnects
- // after it is committed, it'll miss the Put.
- if ws.initReq.rev == 0 {
- nextRev = wr.Header.Revision
- }
- }
- } else {
- // current progress of watch; <= store revision
- nextRev = wr.Header.Revision
- }
-
- if len(wr.Events) > 0 {
- nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
- }
- ws.initReq.rev = nextRev
-
- // created event is already sent above,
- // watcher should not post duplicate events
- if wr.Created {
- continue
- }
-
- // TODO pause channel if buffer gets too large
- ws.buf = append(ws.buf, wr)
- case <-w.ctx.Done():
- return
- case <-ws.initReq.ctx.Done():
- return
- case <-resumec:
- resuming = true
- return
- }
- }
- // lazily send cancel message if events on missing id
-}
-
-func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
- // mark all substreams as resuming
- close(w.resumec)
- w.resumec = make(chan struct{})
- w.joinSubstreams()
- for _, ws := range w.substreams {
- ws.id = -1
- w.resuming = append(w.resuming, ws)
- }
- // strip out nils, if any
- var resuming []*watcherStream
- for _, ws := range w.resuming {
- if ws != nil {
- resuming = append(resuming, ws)
- }
- }
- w.resuming = resuming
- w.substreams = make(map[int64]*watcherStream)
-
- // connect to grpc stream while accepting watcher cancelation
- stopc := make(chan struct{})
- donec := w.waitCancelSubstreams(stopc)
- wc, err := w.openWatchClient()
- close(stopc)
- <-donec
-
- // serve all non-closing streams, even if there's a client error
- // so that the teardown path can shutdown the streams as expected.
- for _, ws := range w.resuming {
- if ws.closing {
- continue
- }
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
- }
-
- if err != nil {
- return nil, v3rpc.Error(err)
- }
-
- // receive data from new grpc stream
- go w.serveWatchClient(wc)
- return wc, nil
-}
-
-func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
- var wg sync.WaitGroup
- wg.Add(len(w.resuming))
- donec := make(chan struct{})
- for i := range w.resuming {
- go func(ws *watcherStream) {
- defer wg.Done()
- if ws.closing {
- if ws.initReq.ctx.Err() != nil && ws.outc != nil {
- close(ws.outc)
- ws.outc = nil
- }
- return
- }
- select {
- case <-ws.initReq.ctx.Done():
- // closed ws will be removed from resuming
- ws.closing = true
- close(ws.outc)
- ws.outc = nil
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- w.closingc <- ws
- }()
- case <-stopc:
- }
- }(w.resuming[i])
- }
- go func() {
- defer close(donec)
- wg.Wait()
- }()
- return donec
-}
-
-// joinSubstreams waits for all substream goroutines to complete.
-func (w *watchGrpcStream) joinSubstreams() {
- for _, ws := range w.substreams {
- <-ws.donec
- }
- for _, ws := range w.resuming {
- if ws != nil {
- <-ws.donec
- }
- }
-}
-
-var maxBackoff = 100 * time.Millisecond
-
-// openWatchClient retries opening a watch client until success or halt.
-// manually retry in case "ws==nil && err==nil"
-// TODO: remove FailFast=false
-func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
- backoff := time.Millisecond
- for {
- select {
- case <-w.ctx.Done():
- if err == nil {
- return nil, w.ctx.Err()
- }
- return nil, err
- default:
- }
- if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
- break
- }
- if isHaltErr(w.ctx, err) {
- return nil, v3rpc.Error(err)
- }
- if isUnavailableErr(w.ctx, err) {
- // retry, but backoff
- if backoff < maxBackoff {
- // 25% backoff factor
- backoff = backoff + backoff/4
- if backoff > maxBackoff {
- backoff = maxBackoff
- }
- }
- time.Sleep(backoff)
- }
- }
- return ws, nil
-}
-
-// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
-func (wr *watchRequest) toPB() *pb.WatchRequest {
- req := &pb.WatchCreateRequest{
- StartRevision: wr.rev,
- Key: []byte(wr.key),
- RangeEnd: []byte(wr.end),
- ProgressNotify: wr.progressNotify,
- Filters: wr.filters,
- PrevKv: wr.prevKV,
- }
- cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
- return &pb.WatchRequest{RequestUnion: cr}
-}
-
-func streamKeyFromCtx(ctx context.Context) string {
- if md, ok := metadata.FromOutgoingContext(ctx); ok {
- return fmt.Sprintf("%+v", md)
- }
- return ""
-}
diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go
deleted file mode 100644
index 8100b69..0000000
--- a/vendor/github.com/coreos/etcd/compactor/compactor.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compactor
-
-import (
- "context"
- "fmt"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor")
-)
-
-const (
- ModePeriodic = "periodic"
- ModeRevision = "revision"
-)
-
-// Compactor purges old log from the storage periodically.
-type Compactor interface {
- // Run starts the main loop of the compactor in background.
- // Use Stop() to halt the loop and release the resource.
- Run()
- // Stop halts the main loop of the compactor.
- Stop()
- // Pause temporally suspend the compactor not to run compaction. Resume() to unpose.
- Pause()
- // Resume restarts the compactor suspended by Pause().
- Resume()
-}
-
-type Compactable interface {
- Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
-}
-
-type RevGetter interface {
- Rev() int64
-}
-
-func New(mode string, retention time.Duration, rg RevGetter, c Compactable) (Compactor, error) {
- switch mode {
- case ModePeriodic:
- return NewPeriodic(retention, rg, c), nil
- case ModeRevision:
- return NewRevision(int64(retention), rg, c), nil
- default:
- return nil, fmt.Errorf("unsupported compaction mode %s", mode)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/compactor/doc.go b/vendor/github.com/coreos/etcd/compactor/doc.go
deleted file mode 100644
index cb15834..0000000
--- a/vendor/github.com/coreos/etcd/compactor/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package compactor implements automated policies for compacting etcd's mvcc storage.
-package compactor
diff --git a/vendor/github.com/coreos/etcd/compactor/periodic.go b/vendor/github.com/coreos/etcd/compactor/periodic.go
deleted file mode 100644
index 9d9164e..0000000
--- a/vendor/github.com/coreos/etcd/compactor/periodic.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compactor
-
-import (
- "context"
- "sync"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
-
- "github.com/jonboulle/clockwork"
-)
-
-// Periodic compacts the log by purging revisions older than
-// the configured retention time.
-type Periodic struct {
- clock clockwork.Clock
- period time.Duration
-
- rg RevGetter
- c Compactable
-
- revs []int64
- ctx context.Context
- cancel context.CancelFunc
-
- // mu protects paused
- mu sync.RWMutex
- paused bool
-}
-
-// NewPeriodic creates a new instance of Periodic compactor that purges
-// the log older than h Duration.
-func NewPeriodic(h time.Duration, rg RevGetter, c Compactable) *Periodic {
- return newPeriodic(clockwork.NewRealClock(), h, rg, c)
-}
-
-func newPeriodic(clock clockwork.Clock, h time.Duration, rg RevGetter, c Compactable) *Periodic {
- t := &Periodic{
- clock: clock,
- period: h,
- rg: rg,
- c: c,
- revs: make([]int64, 0),
- }
- t.ctx, t.cancel = context.WithCancel(context.Background())
- return t
-}
-
-/*
-Compaction period 1-hour:
- 1. compute compaction period, which is 1-hour
- 2. record revisions for every 1/10 of 1-hour (6-minute)
- 3. keep recording revisions with no compaction for first 1-hour
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 1-hour (6-minute)
-
-Compaction period 24-hour:
- 1. compute compaction period, which is 1-hour
- 2. record revisions for every 1/10 of 1-hour (6-minute)
- 3. keep recording revisions with no compaction for first 24-hour
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 1-hour (6-minute)
-
-Compaction period 59-min:
- 1. compute compaction period, which is 59-min
- 2. record revisions for every 1/10 of 59-min (5.9-min)
- 3. keep recording revisions with no compaction for first 59-min
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 59-min (5.9-min)
-
-Compaction period 5-sec:
- 1. compute compaction period, which is 5-sec
- 2. record revisions for every 1/10 of 5-sec (0.5-sec)
- 3. keep recording revisions with no compaction for first 5-sec
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 5-sec (0.5-sec)
-*/
-
-// Run runs periodic compactor.
-func (t *Periodic) Run() {
- compactInterval := t.getCompactInterval()
- retryInterval := t.getRetryInterval()
- retentions := t.getRetentions()
-
- go func() {
- lastSuccess := t.clock.Now()
- baseInterval := t.period
- for {
- t.revs = append(t.revs, t.rg.Rev())
- if len(t.revs) > retentions {
- t.revs = t.revs[1:] // t.revs[0] is always the rev at t.period ago
- }
-
- select {
- case <-t.ctx.Done():
- return
- case <-t.clock.After(retryInterval):
- t.mu.Lock()
- p := t.paused
- t.mu.Unlock()
- if p {
- continue
- }
- }
-
- if t.clock.Now().Sub(lastSuccess) < baseInterval {
- continue
- }
-
- // wait up to initial given period
- if baseInterval == t.period {
- baseInterval = compactInterval
- }
- rev := t.revs[0]
-
- plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, t.period)
- _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
- if err == nil || err == mvcc.ErrCompacted {
- lastSuccess = t.clock.Now()
- plog.Noticef("Finished auto-compaction at revision %d", rev)
- } else {
- plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
- plog.Noticef("Retry after %v", retryInterval)
- }
- }
- }()
-}
-
-// if given compaction period x is <1-hour, compact every x duration.
-// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='10m', then compact every 10-minute)
-// if given compaction period x is >1-hour, compact every hour.
-// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='2h', then compact every 1-hour)
-func (t *Periodic) getCompactInterval() time.Duration {
- itv := t.period
- if itv > time.Hour {
- itv = time.Hour
- }
- return itv
-}
-
-func (t *Periodic) getRetentions() int {
- return int(t.period/t.getRetryInterval()) + 1
-}
-
-const retryDivisor = 10
-
-func (t *Periodic) getRetryInterval() time.Duration {
- itv := t.period
- if itv > time.Hour {
- itv = time.Hour
- }
- return itv / retryDivisor
-}
-
-// Stop stops periodic compactor.
-func (t *Periodic) Stop() {
- t.cancel()
-}
-
-// Pause pauses periodic compactor.
-func (t *Periodic) Pause() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = true
-}
-
-// Resume resumes periodic compactor.
-func (t *Periodic) Resume() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = false
-}
diff --git a/vendor/github.com/coreos/etcd/compactor/revision.go b/vendor/github.com/coreos/etcd/compactor/revision.go
deleted file mode 100644
index 927e41c..0000000
--- a/vendor/github.com/coreos/etcd/compactor/revision.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compactor
-
-import (
- "context"
- "sync"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
-
- "github.com/jonboulle/clockwork"
-)
-
-// Revision compacts the log by purging revisions older than
-// the configured reivison number. Compaction happens every 5 minutes.
-type Revision struct {
- clock clockwork.Clock
- retention int64
-
- rg RevGetter
- c Compactable
-
- ctx context.Context
- cancel context.CancelFunc
-
- mu sync.Mutex
- paused bool
-}
-
-// NewRevision creates a new instance of Revisonal compactor that purges
-// the log older than retention revisions from the current revision.
-func NewRevision(retention int64, rg RevGetter, c Compactable) *Revision {
- return newRevision(clockwork.NewRealClock(), retention, rg, c)
-}
-
-func newRevision(clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision {
- t := &Revision{
- clock: clock,
- retention: retention,
- rg: rg,
- c: c,
- }
- t.ctx, t.cancel = context.WithCancel(context.Background())
- return t
-}
-
-const revInterval = 5 * time.Minute
-
-// Run runs revision-based compactor.
-func (t *Revision) Run() {
- prev := int64(0)
- go func() {
- for {
- select {
- case <-t.ctx.Done():
- return
- case <-t.clock.After(revInterval):
- t.mu.Lock()
- p := t.paused
- t.mu.Unlock()
- if p {
- continue
- }
- }
-
- rev := t.rg.Rev() - t.retention
- if rev <= 0 || rev == prev {
- continue
- }
-
- plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, t.retention)
- _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
- if err == nil || err == mvcc.ErrCompacted {
- prev = rev
- plog.Noticef("Finished auto-compaction at revision %d", rev)
- } else {
- plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
- plog.Noticef("Retry after %v", revInterval)
- }
- }
- }()
-}
-
-// Stop stops revision-based compactor.
-func (t *Revision) Stop() {
- t.cancel()
-}
-
-// Pause pauses revision-based compactor.
-func (t *Revision) Pause() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = true
-}
-
-// Resume resumes revision-based compactor.
-func (t *Revision) Resume() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = false
-}
diff --git a/vendor/github.com/coreos/etcd/cover b/vendor/github.com/coreos/etcd/cover
deleted file mode 100755
index b7ad391..0000000
--- a/vendor/github.com/coreos/etcd/cover
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-# Generate coverage HTML for a package
-# e.g. PKG=./unit ./cover
-#
-set -e
-
-if [ -z "$PKG" ]; then
- echo "cover only works with a single package, sorry"
- exit 255
-fi
-
-COVEROUT="coverage"
-
-if ! [ -d "$COVEROUT" ]; then
- mkdir "$COVEROUT"
-fi
-
-# strip leading dot/slash and trailing slash and sanitize other slashes
-# e.g. ./etcdserver/etcdhttp/ ==> etcdserver_etcdhttp
-COVERPKG=${PKG/#./}
-COVERPKG=${COVERPKG/#\//}
-COVERPKG=${COVERPKG/%\//}
-COVERPKG=${COVERPKG//\//_}
-
-# generate arg for "go test"
-export COVER="-coverprofile ${COVEROUT}/${COVERPKG}.out"
-
-source ./test
-
-go tool cover -html=${COVEROUT}/${COVERPKG}.out
diff --git a/vendor/github.com/coreos/etcd/discovery/discovery.go b/vendor/github.com/coreos/etcd/discovery/discovery.go
deleted file mode 100644
index 7d1fa0d..0000000
--- a/vendor/github.com/coreos/etcd/discovery/discovery.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package discovery provides an implementation of the cluster discovery that
-// is used by etcd.
-package discovery
-
-import (
- "context"
- "errors"
- "fmt"
- "math"
- "net/http"
- "net/url"
- "path"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/coreos/etcd/client"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/pkg/capnslog"
- "github.com/jonboulle/clockwork"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "discovery")
-
- ErrInvalidURL = errors.New("discovery: invalid URL")
- ErrBadSizeKey = errors.New("discovery: size key is bad")
- ErrSizeNotFound = errors.New("discovery: size key not found")
- ErrTokenNotFound = errors.New("discovery: token not found")
- ErrDuplicateID = errors.New("discovery: found duplicate id")
- ErrDuplicateName = errors.New("discovery: found duplicate name")
- ErrFullCluster = errors.New("discovery: cluster is full")
- ErrTooManyRetries = errors.New("discovery: too many retries")
- ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint")
-)
-
-var (
- // Number of retries discovery will attempt before giving up and erroring out.
- nRetries = uint(math.MaxUint32)
- maxExpoentialRetries = uint(8)
-)
-
-// JoinCluster will connect to the discovery service at the given url, and
-// register the server represented by the given id and config to the cluster
-func JoinCluster(durl, dproxyurl string, id types.ID, config string) (string, error) {
- d, err := newDiscovery(durl, dproxyurl, id)
- if err != nil {
- return "", err
- }
- return d.joinCluster(config)
-}
-
-// GetCluster will connect to the discovery service at the given url and
-// retrieve a string describing the cluster
-func GetCluster(durl, dproxyurl string) (string, error) {
- d, err := newDiscovery(durl, dproxyurl, 0)
- if err != nil {
- return "", err
- }
- return d.getCluster()
-}
-
-type discovery struct {
- cluster string
- id types.ID
- c client.KeysAPI
- retries uint
- url *url.URL
-
- clock clockwork.Clock
-}
-
-// newProxyFunc builds a proxy function from the given string, which should
-// represent a URL that can be used as a proxy. It performs basic
-// sanitization of the URL and returns any error encountered.
-func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) {
- if proxy == "" {
- return nil, nil
- }
- // Do a small amount of URL sanitization to help the user
- // Derived from net/http.ProxyFromEnvironment
- proxyURL, err := url.Parse(proxy)
- if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
- // proxy was bogus. Try prepending "http://" to it and
- // see if that parses correctly. If not, we ignore the
- // error and complain about the original one
- var err2 error
- proxyURL, err2 = url.Parse("http://" + proxy)
- if err2 == nil {
- err = nil
- }
- }
- if err != nil {
- return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
- }
-
- plog.Infof("using proxy %q", proxyURL.String())
- return http.ProxyURL(proxyURL), nil
-}
-
-func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) {
- u, err := url.Parse(durl)
- if err != nil {
- return nil, err
- }
- token := u.Path
- u.Path = ""
- pf, err := newProxyFunc(dproxyurl)
- if err != nil {
- return nil, err
- }
-
- // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
- tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
- if err != nil {
- return nil, err
- }
- tr.Proxy = pf
- cfg := client.Config{
- Transport: tr,
- Endpoints: []string{u.String()},
- }
- c, err := client.New(cfg)
- if err != nil {
- return nil, err
- }
- dc := client.NewKeysAPIWithPrefix(c, "")
- return &discovery{
- cluster: token,
- c: dc,
- id: id,
- url: u,
- clock: clockwork.NewRealClock(),
- }, nil
-}
-
-func (d *discovery) joinCluster(config string) (string, error) {
- // fast path: if the cluster is full, return the error
- // do not need to register to the cluster in this case.
- if _, _, _, err := d.checkCluster(); err != nil {
- return "", err
- }
-
- if err := d.createSelf(config); err != nil {
- // Fails, even on a timeout, if createSelf times out.
- // TODO(barakmich): Retrying the same node might want to succeed here
- // (ie, createSelf should be idempotent for discovery).
- return "", err
- }
-
- nodes, size, index, err := d.checkCluster()
- if err != nil {
- return "", err
- }
-
- all, err := d.waitNodes(nodes, size, index)
- if err != nil {
- return "", err
- }
-
- return nodesToCluster(all, size)
-}
-
-func (d *discovery) getCluster() (string, error) {
- nodes, size, index, err := d.checkCluster()
- if err != nil {
- if err == ErrFullCluster {
- return nodesToCluster(nodes, size)
- }
- return "", err
- }
-
- all, err := d.waitNodes(nodes, size, index)
- if err != nil {
- return "", err
- }
- return nodesToCluster(all, size)
-}
-
-func (d *discovery) createSelf(contents string) error {
- ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- resp, err := d.c.Create(ctx, d.selfKey(), contents)
- cancel()
- if err != nil {
- if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist {
- return ErrDuplicateID
- }
- return err
- }
-
- // ensure self appears on the server we connected to
- w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1})
- _, err = w.Next(context.Background())
- return err
-}
-
-func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
- configKey := path.Join("/", d.cluster, "_config")
- ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- // find cluster size
- resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
- cancel()
- if err != nil {
- if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound {
- return nil, 0, 0, ErrSizeNotFound
- }
- if err == client.ErrInvalidJSON {
- return nil, 0, 0, ErrBadDiscoveryEndpoint
- }
- if ce, ok := err.(*client.ClusterError); ok {
- plog.Error(ce.Detail())
- return d.checkClusterRetry()
- }
- return nil, 0, 0, err
- }
- size, err := strconv.Atoi(resp.Node.Value)
- if err != nil {
- return nil, 0, 0, ErrBadSizeKey
- }
-
- ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- resp, err = d.c.Get(ctx, d.cluster, nil)
- cancel()
- if err != nil {
- if ce, ok := err.(*client.ClusterError); ok {
- plog.Error(ce.Detail())
- return d.checkClusterRetry()
- }
- return nil, 0, 0, err
- }
- var nodes []*client.Node
- // append non-config keys to nodes
- for _, n := range resp.Node.Nodes {
- if !(path.Base(n.Key) == path.Base(configKey)) {
- nodes = append(nodes, n)
- }
- }
-
- snodes := sortableNodes{nodes}
- sort.Sort(snodes)
-
- // find self position
- for i := range nodes {
- if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
- break
- }
- if i >= size-1 {
- return nodes[:size], size, resp.Index, ErrFullCluster
- }
- }
- return nodes, size, resp.Index, nil
-}
-
-func (d *discovery) logAndBackoffForRetry(step string) {
- d.retries++
- // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
- retries := d.retries
- if retries > maxExpoentialRetries {
- retries = maxExpoentialRetries
- }
- retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
- plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTimeInSecond)
- d.clock.Sleep(retryTimeInSecond)
-}
-
-func (d *discovery) checkClusterRetry() ([]*client.Node, int, uint64, error) {
- if d.retries < nRetries {
- d.logAndBackoffForRetry("cluster status check")
- return d.checkCluster()
- }
- return nil, 0, 0, ErrTooManyRetries
-}
-
-func (d *discovery) waitNodesRetry() ([]*client.Node, error) {
- if d.retries < nRetries {
- d.logAndBackoffForRetry("waiting for other nodes")
- nodes, n, index, err := d.checkCluster()
- if err != nil {
- return nil, err
- }
- return d.waitNodes(nodes, n, index)
- }
- return nil, ErrTooManyRetries
-}
-
-func (d *discovery) waitNodes(nodes []*client.Node, size int, index uint64) ([]*client.Node, error) {
- if len(nodes) > size {
- nodes = nodes[:size]
- }
- // watch from the next index
- w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true})
- all := make([]*client.Node, len(nodes))
- copy(all, nodes)
- for _, n := range all {
- if path.Base(n.Key) == path.Base(d.selfKey()) {
- plog.Noticef("found self %s in the cluster", path.Base(d.selfKey()))
- } else {
- plog.Noticef("found peer %s in the cluster", path.Base(n.Key))
- }
- }
-
- // wait for others
- for len(all) < size {
- plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all))
- resp, err := w.Next(context.Background())
- if err != nil {
- if ce, ok := err.(*client.ClusterError); ok {
- plog.Error(ce.Detail())
- return d.waitNodesRetry()
- }
- return nil, err
- }
- plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key))
- all = append(all, resp.Node)
- }
- plog.Noticef("found %d needed peer(s)", len(all))
- return all, nil
-}
-
-func (d *discovery) selfKey() string {
- return path.Join("/", d.cluster, d.id.String())
-}
-
-func nodesToCluster(ns []*client.Node, size int) (string, error) {
- s := make([]string, len(ns))
- for i, n := range ns {
- s[i] = n.Value
- }
- us := strings.Join(s, ",")
- m, err := types.NewURLsMap(us)
- if err != nil {
- return us, ErrInvalidURL
- }
- if m.Len() != size {
- return us, ErrDuplicateName
- }
- return us, nil
-}
-
-type sortableNodes struct{ Nodes []*client.Node }
-
-func (ns sortableNodes) Len() int { return len(ns.Nodes) }
-func (ns sortableNodes) Less(i, j int) bool {
- return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex
-}
-func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] }
diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/github.com/coreos/etcd/embed/config.go
deleted file mode 100644
index 835e051..0000000
--- a/vendor/github.com/coreos/etcd/embed/config.go
+++ /dev/null
@@ -1,699 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "crypto/tls"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/coreos/etcd/compactor"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/pkg/cors"
- "github.com/coreos/etcd/pkg/netutil"
- "github.com/coreos/etcd/pkg/srv"
- "github.com/coreos/etcd/pkg/tlsutil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/pkg/capnslog"
- "github.com/ghodss/yaml"
- "google.golang.org/grpc"
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- ClusterStateFlagNew = "new"
- ClusterStateFlagExisting = "existing"
-
- DefaultName = "default"
- DefaultMaxSnapshots = 5
- DefaultMaxWALs = 5
- DefaultMaxTxnOps = uint(128)
- DefaultMaxRequestBytes = 1.5 * 1024 * 1024
- DefaultGRPCKeepAliveMinTime = 5 * time.Second
- DefaultGRPCKeepAliveInterval = 2 * time.Hour
- DefaultGRPCKeepAliveTimeout = 20 * time.Second
-
- DefaultListenPeerURLs = "http://localhost:2380"
- DefaultListenClientURLs = "http://localhost:2379"
-
- DefaultLogOutput = "default"
-
- // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag.
- // It's enabled by default.
- DefaultStrictReconfigCheck = true
- // DefaultEnableV2 is the default value for "--enable-v2" flag.
- // v2 is enabled by default.
- // TODO: disable v2 when deprecated.
- DefaultEnableV2 = true
-
- // maxElectionMs specifies the maximum value of election timeout.
- // More details are listed in ../Documentation/tuning.md#time-parameters.
- maxElectionMs = 50000
-)
-
-var (
- ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
- "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"")
- ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
-
- DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
- DefaultAdvertiseClientURLs = "http://localhost:2379"
-
- defaultHostname string
- defaultHostStatus error
-)
-
-func init() {
- defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
-}
-
-// Config holds the arguments for configuring an etcd server.
-type Config struct {
- // member
-
- CorsInfo *cors.CORSInfo
- LPUrls, LCUrls []url.URL
- Dir string `json:"data-dir"`
- WalDir string `json:"wal-dir"`
- MaxSnapFiles uint `json:"max-snapshots"`
- MaxWalFiles uint `json:"max-wals"`
- Name string `json:"name"`
- SnapCount uint64 `json:"snapshot-count"`
-
- // AutoCompactionMode is either 'periodic' or 'revision'.
- AutoCompactionMode string `json:"auto-compaction-mode"`
- // AutoCompactionRetention is either duration string with time unit
- // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
- // If no time unit is provided and compaction mode is 'periodic',
- // the unit defaults to hour. For example, '5' translates into 5-hour.
- AutoCompactionRetention string `json:"auto-compaction-retention"`
-
- // TickMs is the number of milliseconds between heartbeat ticks.
- // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
- // make ticks a cluster wide configuration.
- TickMs uint `json:"heartbeat-interval"`
- ElectionMs uint `json:"election-timeout"`
-
- // InitialElectionTickAdvance is true, then local member fast-forwards
- // election ticks to speed up "initial" leader election trigger. This
- // benefits the case of larger election ticks. For instance, cross
- // datacenter deployment may require longer election timeout of 10-second.
- // If true, local node does not need wait up to 10-second. Instead,
- // forwards its election ticks to 8-second, and have only 2-second left
- // before leader election.
- //
- // Major assumptions are that:
- // - cluster has no active leader thus advancing ticks enables faster
- // leader election, or
- // - cluster already has an established leader, and rejoining follower
- // is likely to receive heartbeats from the leader after tick advance
- // and before election timeout.
- //
- // However, when network from leader to rejoining follower is congested,
- // and the follower does not receive leader heartbeat within left election
- // ticks, disruptive election has to happen thus affecting cluster
- // availabilities.
- //
- // Disabling this would slow down initial bootstrap process for cross
- // datacenter deployments. Make your own tradeoffs by configuring
- // --initial-election-tick-advance at the cost of slow initial bootstrap.
- //
- // If single-node, it advances ticks regardless.
- //
- // See https://github.com/coreos/etcd/issues/9333 for more detail.
- InitialElectionTickAdvance bool `json:"initial-election-tick-advance"`
-
- QuotaBackendBytes int64 `json:"quota-backend-bytes"`
- MaxTxnOps uint `json:"max-txn-ops"`
- MaxRequestBytes uint `json:"max-request-bytes"`
-
- // gRPC server options
-
- // GRPCKeepAliveMinTime is the minimum interval that a client should
- // wait before pinging server. When client pings "too fast", server
- // sends goaway and closes the connection (errors: too_many_pings,
- // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens.
- // Server expects client pings only when there is any active streams
- // (PermitWithoutStream is set false).
- GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
- // GRPCKeepAliveInterval is the frequency of server-to-client ping
- // to check if a connection is alive. Close a non-responsive connection
- // after an additional duration of Timeout. 0 to disable.
- GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
- // GRPCKeepAliveTimeout is the additional duration of wait
- // before closing a non-responsive connection. 0 to disable.
- GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
-
- // clustering
-
- APUrls, ACUrls []url.URL
- ClusterState string `json:"initial-cluster-state"`
- DNSCluster string `json:"discovery-srv"`
- Dproxy string `json:"discovery-proxy"`
- Durl string `json:"discovery"`
- InitialCluster string `json:"initial-cluster"`
- InitialClusterToken string `json:"initial-cluster-token"`
- StrictReconfigCheck bool `json:"strict-reconfig-check"`
- EnableV2 bool `json:"enable-v2"`
-
- // security
-
- ClientTLSInfo transport.TLSInfo
- ClientAutoTLS bool
- PeerTLSInfo transport.TLSInfo
- PeerAutoTLS bool
-
- // CipherSuites is a list of supported TLS cipher suites between
- // client/server and peers. If empty, Go auto-populates the list.
- // Note that cipher suites are prioritized in the given order.
- CipherSuites []string `json:"cipher-suites"`
-
- // debug
-
- Debug bool `json:"debug"`
- LogPkgLevels string `json:"log-package-levels"`
- LogOutput string `json:"log-output"`
- EnablePprof bool `json:"enable-pprof"`
- Metrics string `json:"metrics"`
- ListenMetricsUrls []url.URL
- ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
-
- // ForceNewCluster starts a new cluster even if previously started; unsafe.
- ForceNewCluster bool `json:"force-new-cluster"`
-
- // UserHandlers is for registering users handlers and only used for
- // embedding etcd into other applications.
- // The map key is the route path for the handler, and
- // you must ensure it can't be conflicted with etcd's.
- UserHandlers map[string]http.Handler `json:"-"`
- // ServiceRegister is for registering users' gRPC services. A simple usage example:
- // cfg := embed.NewConfig()
- // cfg.ServerRegister = func(s *grpc.Server) {
- // pb.RegisterFooServer(s, &fooServer{})
- // pb.RegisterBarServer(s, &barServer{})
- // }
- // embed.StartEtcd(cfg)
- ServiceRegister func(*grpc.Server) `json:"-"`
-
- // auth
-
- AuthToken string `json:"auth-token"`
-
- // Experimental flags
-
- ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
- ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"`
- ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
-}
-
-// configYAML holds the config suitable for yaml parsing
-type configYAML struct {
- Config
- configJSON
-}
-
-// configJSON has file options that are translated into Config options
-type configJSON struct {
- LPUrlsJSON string `json:"listen-peer-urls"`
- LCUrlsJSON string `json:"listen-client-urls"`
- CorsJSON string `json:"cors"`
- APUrlsJSON string `json:"initial-advertise-peer-urls"`
- ACUrlsJSON string `json:"advertise-client-urls"`
- ClientSecurityJSON securityConfig `json:"client-transport-security"`
- PeerSecurityJSON securityConfig `json:"peer-transport-security"`
-}
-
-type securityConfig struct {
- CAFile string `json:"ca-file"`
- CertFile string `json:"cert-file"`
- KeyFile string `json:"key-file"`
- CertAuth bool `json:"client-cert-auth"`
- TrustedCAFile string `json:"trusted-ca-file"`
- AutoTLS bool `json:"auto-tls"`
-}
-
-// NewConfig creates a new Config populated with default values.
-func NewConfig() *Config {
- lpurl, _ := url.Parse(DefaultListenPeerURLs)
- apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
- lcurl, _ := url.Parse(DefaultListenClientURLs)
- acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
- cfg := &Config{
- CorsInfo: &cors.CORSInfo{},
- MaxSnapFiles: DefaultMaxSnapshots,
- MaxWalFiles: DefaultMaxWALs,
- Name: DefaultName,
- SnapCount: etcdserver.DefaultSnapCount,
- MaxTxnOps: DefaultMaxTxnOps,
- MaxRequestBytes: DefaultMaxRequestBytes,
- GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
- GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
- GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
- TickMs: 100,
- ElectionMs: 1000,
- InitialElectionTickAdvance: true,
- LPUrls: []url.URL{*lpurl},
- LCUrls: []url.URL{*lcurl},
- APUrls: []url.URL{*apurl},
- ACUrls: []url.URL{*acurl},
- ClusterState: ClusterStateFlagNew,
- InitialClusterToken: "etcd-cluster",
- StrictReconfigCheck: DefaultStrictReconfigCheck,
- LogOutput: DefaultLogOutput,
- Metrics: "basic",
- EnableV2: DefaultEnableV2,
- AuthToken: "simple",
- }
- cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
- return cfg
-}
-
-func logTLSHandshakeFailure(conn *tls.Conn, err error) {
- state := conn.ConnectionState()
- remoteAddr := conn.RemoteAddr().String()
- serverName := state.ServerName
- if len(state.PeerCertificates) > 0 {
- cert := state.PeerCertificates[0]
- ips, dns := cert.IPAddresses, cert.DNSNames
- plog.Infof("rejected connection from %q (error %q, ServerName %q, IPAddresses %q, DNSNames %q)", remoteAddr, err.Error(), serverName, ips, dns)
- } else {
- plog.Infof("rejected connection from %q (error %q, ServerName %q)", remoteAddr, err.Error(), serverName)
- }
-}
-
-// SetupLogging initializes etcd logging.
-// Must be called after flag parsing.
-func (cfg *Config) SetupLogging() {
- cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
- cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
-
- capnslog.SetGlobalLogLevel(capnslog.INFO)
- if cfg.Debug {
- capnslog.SetGlobalLogLevel(capnslog.DEBUG)
- grpc.EnableTracing = true
- // enable info, warning, error
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
- } else {
- // only discard info
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
- }
- if cfg.LogPkgLevels != "" {
- repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd")
- settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels)
- if err != nil {
- plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
- return
- }
- repoLog.SetLogLevel(settings)
- }
-
- // capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
- // where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1
- // specify 'stdout' or 'stderr' to skip journald logging even when running under systemd
- switch cfg.LogOutput {
- case "stdout":
- capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug))
- case "stderr":
- capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug))
- case DefaultLogOutput:
- default:
- plog.Panicf(`unknown log-output %q (only supports %q, "stdout", "stderr")`, cfg.LogOutput, DefaultLogOutput)
- }
-}
-
-func ConfigFromFile(path string) (*Config, error) {
- cfg := &configYAML{Config: *NewConfig()}
- if err := cfg.configFromFile(path); err != nil {
- return nil, err
- }
- return &cfg.Config, nil
-}
-
-func (cfg *configYAML) configFromFile(path string) error {
- b, err := ioutil.ReadFile(path)
- if err != nil {
- return err
- }
-
- defaultInitialCluster := cfg.InitialCluster
-
- err = yaml.Unmarshal(b, cfg)
- if err != nil {
- return err
- }
-
- if cfg.LPUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err)
- }
- cfg.LPUrls = []url.URL(u)
- }
-
- if cfg.LCUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up listen-client-urls: %v", err)
- }
- cfg.LCUrls = []url.URL(u)
- }
-
- if cfg.CorsJSON != "" {
- if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil {
- plog.Panicf("unexpected error setting up cors: %v", err)
- }
- }
-
- if cfg.APUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err)
- }
- cfg.APUrls = []url.URL(u)
- }
-
- if cfg.ACUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err)
- }
- cfg.ACUrls = []url.URL(u)
- }
-
- if cfg.ListenMetricsUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err)
- }
- cfg.ListenMetricsUrls = []url.URL(u)
- }
-
- // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName
- if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster {
- cfg.InitialCluster = ""
- }
- if cfg.ClusterState == "" {
- cfg.ClusterState = ClusterStateFlagNew
- }
-
- copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
- tls.CAFile = ysc.CAFile
- tls.CertFile = ysc.CertFile
- tls.KeyFile = ysc.KeyFile
- tls.ClientCertAuth = ysc.CertAuth
- tls.TrustedCAFile = ysc.TrustedCAFile
- }
- copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
- copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
- cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
- cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
-
- return cfg.Validate()
-}
-
-func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
- if len(tls.CipherSuites) > 0 && len(ss) > 0 {
- return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
- }
- if len(ss) > 0 {
- cs := make([]uint16, len(ss))
- for i, s := range ss {
- var ok bool
- cs[i], ok = tlsutil.GetCipherSuite(s)
- if !ok {
- return fmt.Errorf("unexpected TLS cipher suite %q", s)
- }
- }
- tls.CipherSuites = cs
- }
- return nil
-}
-
-// Validate ensures that '*embed.Config' fields are properly configured.
-func (cfg *Config) Validate() error {
- if err := checkBindURLs(cfg.LPUrls); err != nil {
- return err
- }
- if err := checkBindURLs(cfg.LCUrls); err != nil {
- return err
- }
- if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
- return err
- }
- if err := checkHostURLs(cfg.APUrls); err != nil {
- // TODO: return err in v3.4
- addrs := make([]string, len(cfg.APUrls))
- for i := range cfg.APUrls {
- addrs[i] = cfg.APUrls[i].String()
- }
- plog.Warningf("advertise-peer-urls %q is deprecated (%v)", strings.Join(addrs, ","), err)
- }
- if err := checkHostURLs(cfg.ACUrls); err != nil {
- // TODO: return err in v3.4
- addrs := make([]string, len(cfg.ACUrls))
- for i := range cfg.ACUrls {
- addrs[i] = cfg.ACUrls[i].String()
- }
- plog.Warningf("advertise-client-urls %q is deprecated (%v)", strings.Join(addrs, ","), err)
- }
-
- // Check if conflicting flags are passed.
- nSet := 0
- for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} {
- if v {
- nSet++
- }
- }
-
- if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
- return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState)
- }
-
- if nSet > 1 {
- return ErrConflictBootstrapFlags
- }
-
- if cfg.TickMs <= 0 {
- return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs)
- }
- if cfg.ElectionMs <= 0 {
- return fmt.Errorf("--election-timeout must be >0 (set to %dms)", cfg.ElectionMs)
- }
- if 5*cfg.TickMs > cfg.ElectionMs {
- return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
- }
- if cfg.ElectionMs > maxElectionMs {
- return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs)
- }
-
- // check this last since proxying in etcdmain may make this OK
- if cfg.LCUrls != nil && cfg.ACUrls == nil {
- return ErrUnsetAdvertiseClientURLsFlag
- }
-
- switch cfg.AutoCompactionMode {
- case "":
- case compactor.ModeRevision, compactor.ModePeriodic:
- default:
- return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
- }
-
- return nil
-}
-
-// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
-func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
- token = cfg.InitialClusterToken
- switch {
- case cfg.Durl != "":
- urlsmap = types.URLsMap{}
- // If using discovery, generate a temporary cluster based on
- // self's advertised peer URLs
- urlsmap[cfg.Name] = cfg.APUrls
- token = cfg.Durl
- case cfg.DNSCluster != "":
- clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls)
- if cerr != nil {
- plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr)
- return nil, "", cerr
- }
- for _, s := range clusterStrs {
- plog.Noticef("got bootstrap from DNS for etcd-server at %s", s)
- }
- clusterStr := strings.Join(clusterStrs, ",")
- if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" {
- cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
- }
- urlsmap, err = types.NewURLsMap(clusterStr)
- // only etcd member must belong to the discovered cluster.
- // proxy does not need to belong to the discovered cluster.
- if which == "etcd" {
- if _, ok := urlsmap[cfg.Name]; !ok {
- return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
- }
- }
- default:
- // We're statically configured, and cluster has appropriately been set.
- urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
- }
- return urlsmap, token, err
-}
-
-func (cfg Config) InitialClusterFromName(name string) (ret string) {
- if len(cfg.APUrls) == 0 {
- return ""
- }
- n := name
- if name == "" {
- n = DefaultName
- }
- for i := range cfg.APUrls {
- ret = ret + "," + n + "=" + cfg.APUrls[i].String()
- }
- return ret[1:]
-}
-
-func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
-func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
-
-func (cfg Config) defaultPeerHost() bool {
- return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
-}
-
-func (cfg Config) defaultClientHost() bool {
- return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
-}
-
-func (cfg *Config) ClientSelfCert() (err error) {
- if !cfg.ClientAutoTLS {
- return nil
- }
- if !cfg.ClientTLSInfo.Empty() {
- plog.Warningf("ignoring client auto TLS since certs given")
- return nil
- }
- chosts := make([]string, len(cfg.LCUrls))
- for i, u := range cfg.LCUrls {
- chosts[i] = u.Host
- }
- cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
- if err != nil {
- return err
- }
- return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
-}
-
-func (cfg *Config) PeerSelfCert() (err error) {
- if !cfg.PeerAutoTLS {
- return nil
- }
- if !cfg.PeerTLSInfo.Empty() {
- plog.Warningf("ignoring peer auto TLS since certs given")
- return nil
- }
- phosts := make([]string, len(cfg.LPUrls))
- for i, u := range cfg.LPUrls {
- phosts[i] = u.Host
- }
- cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
- if err != nil {
- return err
- }
- return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
-}
-
-// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
-// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
-// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
-// then the advertise peer host would be updated with machine's default host,
-// while keeping the listen URL's port.
-// User can work around this by explicitly setting URL with 127.0.0.1.
-// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
-// TODO: check whether fields are set instead of whether fields have default value
-func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
- if defaultHostname == "" || defaultHostStatus != nil {
- // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
- if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
- cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
- }
- return "", defaultHostStatus
- }
-
- used := false
- pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
- if cfg.defaultPeerHost() && pip == "0.0.0.0" {
- cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
- used = true
- }
- // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
- if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
- cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
- }
-
- cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
- if cfg.defaultClientHost() && cip == "0.0.0.0" {
- cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
- used = true
- }
- dhost := defaultHostname
- if !used {
- dhost = ""
- }
- return dhost, defaultHostStatus
-}
-
-// checkBindURLs returns an error if any URL uses a domain name.
-func checkBindURLs(urls []url.URL) error {
- for _, url := range urls {
- if url.Scheme == "unix" || url.Scheme == "unixs" {
- continue
- }
- host, _, err := net.SplitHostPort(url.Host)
- if err != nil {
- return err
- }
- if host == "localhost" {
- // special case for local address
- // TODO: support /etc/hosts ?
- continue
- }
- if net.ParseIP(host) == nil {
- return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
- }
- }
- return nil
-}
-
-func checkHostURLs(urls []url.URL) error {
- for _, url := range urls {
- host, _, err := net.SplitHostPort(url.Host)
- if err != nil {
- return err
- }
- if host == "" {
- return fmt.Errorf("unexpected empty host (%s)", url.String())
- }
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/embed/doc.go b/vendor/github.com/coreos/etcd/embed/doc.go
deleted file mode 100644
index c555aa5..0000000
--- a/vendor/github.com/coreos/etcd/embed/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package embed provides bindings for embedding an etcd server in a program.
-
-Launch an embedded etcd server using the configuration defaults:
-
- import (
- "log"
- "time"
-
- "github.com/coreos/etcd/embed"
- )
-
- func main() {
- cfg := embed.NewConfig()
- cfg.Dir = "default.etcd"
- e, err := embed.StartEtcd(cfg)
- if err != nil {
- log.Fatal(err)
- }
- defer e.Close()
- select {
- case <-e.Server.ReadyNotify():
- log.Printf("Server is ready!")
- case <-time.After(60 * time.Second):
- e.Server.Stop() // trigger a shutdown
- log.Printf("Server took too long to start!")
- }
- log.Fatal(<-e.Err())
- }
-*/
-package embed
diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/github.com/coreos/etcd/embed/etcd.go
deleted file mode 100644
index bd848a7..0000000
--- a/vendor/github.com/coreos/etcd/embed/etcd.go
+++ /dev/null
@@ -1,582 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "io/ioutil"
- defaultLog "log"
- "net"
- "net/http"
- "net/url"
- "strconv"
- "sync"
- "time"
-
- "github.com/coreos/etcd/compactor"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v2http"
- "github.com/coreos/etcd/etcdserver/api/v2v3"
- "github.com/coreos/etcd/etcdserver/api/v3client"
- "github.com/coreos/etcd/etcdserver/api/v3rpc"
- "github.com/coreos/etcd/pkg/cors"
- "github.com/coreos/etcd/pkg/debugutil"
- runtimeutil "github.com/coreos/etcd/pkg/runtime"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/rafthttp"
-
- "github.com/coreos/pkg/capnslog"
- grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
- "github.com/soheilhy/cmux"
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
-)
-
-var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed")
-
-const (
- // internal fd usage includes disk usage and transport usage.
- // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
- // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
- // read all logs after some snapshot index, which locates at the end of
- // the second last and the head of the last. For purging, it needs to read
- // directory, so it needs 1. For fd monitor, it needs 1.
- // For transport, rafthttp builds two long-polling connections and at most
- // four temporary connections with each member. There are at most 9 members
- // in a cluster, so it should reserve 96.
- // For the safety, we set the total reserved number to 150.
- reservedInternalFDNum = 150
-)
-
-// Etcd contains a running etcd server and its listeners.
-type Etcd struct {
- Peers []*peerListener
- Clients []net.Listener
- // a map of contexts for the servers that serves client requests.
- sctxs map[string]*serveCtx
- metricsListeners []net.Listener
-
- Server *etcdserver.EtcdServer
-
- cfg Config
- stopc chan struct{}
- errc chan error
-
- closeOnce sync.Once
-}
-
-type peerListener struct {
- net.Listener
- serve func() error
- close func(context.Context) error
-}
-
-// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
-// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
-// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
-func StartEtcd(inCfg *Config) (e *Etcd, err error) {
- if err = inCfg.Validate(); err != nil {
- return nil, err
- }
- serving := false
- e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
- cfg := &e.cfg
- defer func() {
- if e == nil || err == nil {
- return
- }
- if !serving {
- // errored before starting gRPC server for serveCtx.serversC
- for _, sctx := range e.sctxs {
- close(sctx.serversC)
- }
- }
- e.Close()
- e = nil
- }()
-
- if e.Peers, err = startPeerListeners(cfg); err != nil {
- return e, err
- }
- if e.sctxs, err = startClientListeners(cfg); err != nil {
- return e, err
- }
- for _, sctx := range e.sctxs {
- e.Clients = append(e.Clients, sctx.l)
- }
-
- var (
- urlsmap types.URLsMap
- token string
- )
-
- memberInitialized := true
- if !isMemberInitialized(cfg) {
- memberInitialized = false
- urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
- if err != nil {
- return e, fmt.Errorf("error setting up initial cluster: %v", err)
- }
- }
-
- // AutoCompactionRetention defaults to "0" if not set.
- if len(cfg.AutoCompactionRetention) == 0 {
- cfg.AutoCompactionRetention = "0"
- }
- autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
- if err != nil {
- return e, err
- }
-
- srvcfg := etcdserver.ServerConfig{
- Name: cfg.Name,
- ClientURLs: cfg.ACUrls,
- PeerURLs: cfg.APUrls,
- DataDir: cfg.Dir,
- DedicatedWALDir: cfg.WalDir,
- SnapCount: cfg.SnapCount,
- MaxSnapFiles: cfg.MaxSnapFiles,
- MaxWALFiles: cfg.MaxWalFiles,
- InitialPeerURLsMap: urlsmap,
- InitialClusterToken: token,
- DiscoveryURL: cfg.Durl,
- DiscoveryProxy: cfg.Dproxy,
- NewCluster: cfg.IsNewCluster(),
- ForceNewCluster: cfg.ForceNewCluster,
- PeerTLSInfo: cfg.PeerTLSInfo,
- TickMs: cfg.TickMs,
- ElectionTicks: cfg.ElectionTicks(),
- InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
- AutoCompactionRetention: autoCompactionRetention,
- AutoCompactionMode: cfg.AutoCompactionMode,
- QuotaBackendBytes: cfg.QuotaBackendBytes,
- MaxTxnOps: cfg.MaxTxnOps,
- MaxRequestBytes: cfg.MaxRequestBytes,
- StrictReconfigCheck: cfg.StrictReconfigCheck,
- ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
- AuthToken: cfg.AuthToken,
- InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
- CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
- Debug: cfg.Debug,
- }
-
- if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
- return e, err
- }
-
- // buffer channel so goroutines on closed connections won't wait forever
- e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
-
- // newly started member ("memberInitialized==false")
- // does not need corruption check
- if memberInitialized {
- if err = e.Server.CheckInitialHashKV(); err != nil {
- // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
- // (nothing to close since rafthttp transports have not been started)
- e.Server = nil
- return e, err
- }
- }
- e.Server.Start()
-
- if err = e.servePeers(); err != nil {
- return e, err
- }
- if err = e.serveClients(); err != nil {
- return e, err
- }
- if err = e.serveMetrics(); err != nil {
- return e, err
- }
-
- serving = true
- return e, nil
-}
-
-// Config returns the current configuration.
-func (e *Etcd) Config() Config {
- return e.cfg
-}
-
-// Close gracefully shuts down all servers/listeners.
-// Client requests will be terminated with request timeout.
-// After timeout, enforce remaning requests be closed immediately.
-func (e *Etcd) Close() {
- e.closeOnce.Do(func() { close(e.stopc) })
-
- // close client requests with request timeout
- timeout := 2 * time.Second
- if e.Server != nil {
- timeout = e.Server.Cfg.ReqTimeout()
- }
- for _, sctx := range e.sctxs {
- for ss := range sctx.serversC {
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- stopServers(ctx, ss)
- cancel()
- }
- }
-
- for _, sctx := range e.sctxs {
- sctx.cancel()
- }
-
- for i := range e.Clients {
- if e.Clients[i] != nil {
- e.Clients[i].Close()
- }
- }
-
- for i := range e.metricsListeners {
- e.metricsListeners[i].Close()
- }
-
- // close rafthttp transports
- if e.Server != nil {
- e.Server.Stop()
- }
-
- // close all idle connections in peer handler (wait up to 1-second)
- for i := range e.Peers {
- if e.Peers[i] != nil && e.Peers[i].close != nil {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- e.Peers[i].close(ctx)
- cancel()
- }
- }
-}
-
-func stopServers(ctx context.Context, ss *servers) {
- shutdownNow := func() {
- // first, close the http.Server
- ss.http.Shutdown(ctx)
- // then close grpc.Server; cancels all active RPCs
- ss.grpc.Stop()
- }
-
- // do not grpc.Server.GracefulStop with TLS enabled etcd server
- // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
- // and https://github.com/coreos/etcd/issues/8916
- if ss.secure {
- shutdownNow()
- return
- }
-
- ch := make(chan struct{})
- go func() {
- defer close(ch)
- // close listeners to stop accepting new connections,
- // will block on any existing transports
- ss.grpc.GracefulStop()
- }()
-
- // wait until all pending RPCs are finished
- select {
- case <-ch:
- case <-ctx.Done():
- // took too long, manually close open transports
- // e.g. watch streams
- shutdownNow()
-
- // concurrent GracefulStop should be interrupted
- <-ch
- }
-}
-
-func (e *Etcd) Err() <-chan error { return e.errc }
-
-func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
- if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
- return nil, err
- }
- if err = cfg.PeerSelfCert(); err != nil {
- plog.Fatalf("could not get certs (%v)", err)
- }
- if !cfg.PeerTLSInfo.Empty() {
- plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
- }
-
- peers = make([]*peerListener, len(cfg.LPUrls))
- defer func() {
- if err == nil {
- return
- }
- for i := range peers {
- if peers[i] != nil && peers[i].close != nil {
- plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- peers[i].close(ctx)
- cancel()
- }
- }
- }()
-
- for i, u := range cfg.LPUrls {
- if u.Scheme == "http" {
- if !cfg.PeerTLSInfo.Empty() {
- plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
- }
- if cfg.PeerTLSInfo.ClientCertAuth {
- plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
- }
- }
- peers[i] = &peerListener{close: func(context.Context) error { return nil }}
- peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
- if err != nil {
- return nil, err
- }
- // once serve, overwrite with 'http.Server.Shutdown'
- peers[i].close = func(context.Context) error {
- return peers[i].Listener.Close()
- }
- plog.Info("listening for peers on ", u.String())
- }
- return peers, nil
-}
-
-// configure peer handlers after rafthttp.Transport started
-func (e *Etcd) servePeers() (err error) {
- ph := etcdhttp.NewPeerHandler(e.Server)
- var peerTLScfg *tls.Config
- if !e.cfg.PeerTLSInfo.Empty() {
- if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
- return err
- }
- }
-
- for _, p := range e.Peers {
- gs := v3rpc.Server(e.Server, peerTLScfg)
- m := cmux.New(p.Listener)
- go gs.Serve(m.Match(cmux.HTTP2()))
- srv := &http.Server{
- Handler: grpcHandlerFunc(gs, ph),
- ReadTimeout: 5 * time.Minute,
- ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
- }
- go srv.Serve(m.Match(cmux.Any()))
- p.serve = func() error { return m.Serve() }
- p.close = func(ctx context.Context) error {
- // gracefully shutdown http.Server
- // close open listeners, idle connections
- // until context cancel or time-out
- stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
- return nil
- }
- }
-
- // start peer servers in a goroutine
- for _, pl := range e.Peers {
- go func(l *peerListener) {
- e.errHandler(l.serve())
- }(pl)
- }
- return nil
-}
-
-func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
- if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
- return nil, err
- }
- if err = cfg.ClientSelfCert(); err != nil {
- plog.Fatalf("could not get certs (%v)", err)
- }
- if cfg.EnablePprof {
- plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
- }
-
- sctxs = make(map[string]*serveCtx)
- for _, u := range cfg.LCUrls {
- sctx := newServeCtx()
-
- if u.Scheme == "http" || u.Scheme == "unix" {
- if !cfg.ClientTLSInfo.Empty() {
- plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
- }
- if cfg.ClientTLSInfo.ClientCertAuth {
- plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
- }
- }
- if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
- return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String())
- }
-
- proto := "tcp"
- addr := u.Host
- if u.Scheme == "unix" || u.Scheme == "unixs" {
- proto = "unix"
- addr = u.Host + u.Path
- }
-
- sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
- sctx.insecure = !sctx.secure
- if oldctx := sctxs[addr]; oldctx != nil {
- oldctx.secure = oldctx.secure || sctx.secure
- oldctx.insecure = oldctx.insecure || sctx.insecure
- continue
- }
-
- if sctx.l, err = net.Listen(proto, addr); err != nil {
- return nil, err
- }
- // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
- // hosts that disable ipv6. So, use the address given by the user.
- sctx.addr = addr
-
- if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
- if fdLimit <= reservedInternalFDNum {
- plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
- }
- sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
- }
-
- if proto == "tcp" {
- if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil {
- return nil, err
- }
- }
-
- plog.Info("listening for client requests on ", u.Host)
- defer func() {
- if err != nil {
- sctx.l.Close()
- plog.Info("stopping listening for client requests on ", u.Host)
- }
- }()
- for k := range cfg.UserHandlers {
- sctx.userHandlers[k] = cfg.UserHandlers[k]
- }
- sctx.serviceRegister = cfg.ServiceRegister
- if cfg.EnablePprof || cfg.Debug {
- sctx.registerPprof()
- }
- if cfg.Debug {
- sctx.registerTrace()
- }
- sctxs[addr] = sctx
- }
- return sctxs, nil
-}
-
-func (e *Etcd) serveClients() (err error) {
- if !e.cfg.ClientTLSInfo.Empty() {
- plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
- }
-
- if e.cfg.CorsInfo.String() != "" {
- plog.Infof("cors = %s", e.cfg.CorsInfo)
- }
-
- // Start a client server goroutine for each listen address
- var h http.Handler
- if e.Config().EnableV2 {
- if len(e.Config().ExperimentalEnableV2V3) > 0 {
- srv := v2v3.NewServer(v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
- h = v2http.NewClientHandler(srv, e.Server.Cfg.ReqTimeout())
- } else {
- h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
- }
- } else {
- mux := http.NewServeMux()
- etcdhttp.HandleBasic(mux, e.Server)
- h = mux
- }
- h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo})
-
- gopts := []grpc.ServerOption{}
- if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
- gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
- MinTime: e.cfg.GRPCKeepAliveMinTime,
- PermitWithoutStream: false,
- }))
- }
- if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
- e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
- gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: e.cfg.GRPCKeepAliveInterval,
- Timeout: e.cfg.GRPCKeepAliveTimeout,
- }))
- }
-
- // start client servers in a goroutine
- for _, sctx := range e.sctxs {
- go func(s *serveCtx) {
- e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
- }(sctx)
- }
- return nil
-}
-
-func (e *Etcd) serveMetrics() (err error) {
- if e.cfg.Metrics == "extensive" {
- grpc_prometheus.EnableHandlingTimeHistogram()
- }
-
- if len(e.cfg.ListenMetricsUrls) > 0 {
- metricsMux := http.NewServeMux()
- etcdhttp.HandleMetricsHealth(metricsMux, e.Server)
-
- for _, murl := range e.cfg.ListenMetricsUrls {
- tlsInfo := &e.cfg.ClientTLSInfo
- if murl.Scheme == "http" {
- tlsInfo = nil
- }
- ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsInfo)
- if err != nil {
- return err
- }
- e.metricsListeners = append(e.metricsListeners, ml)
- go func(u url.URL, ln net.Listener) {
- plog.Info("listening for metrics on ", u.String())
- e.errHandler(http.Serve(ln, metricsMux))
- }(murl, ml)
- }
- }
- return nil
-}
-
-func (e *Etcd) errHandler(err error) {
- select {
- case <-e.stopc:
- return
- default:
- }
- select {
- case <-e.stopc:
- case e.errc <- err:
- }
-}
-
-func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
- h, err := strconv.Atoi(retention)
- if err == nil {
- switch mode {
- case compactor.ModeRevision:
- ret = time.Duration(int64(h))
- case compactor.ModePeriodic:
- ret = time.Duration(int64(h)) * time.Hour
- }
- } else {
- // periodic compaction
- ret, err = time.ParseDuration(retention)
- if err != nil {
- return 0, fmt.Errorf("error parsing CompactionRetention: %v", err)
- }
- }
- return ret, nil
-}
diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go
deleted file mode 100644
index 62b8b57..0000000
--- a/vendor/github.com/coreos/etcd/embed/serve.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "context"
- "io/ioutil"
- defaultLog "log"
- "net"
- "net/http"
- "strings"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3client"
- "github.com/coreos/etcd/etcdserver/api/v3election"
- "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
- v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw"
- "github.com/coreos/etcd/etcdserver/api/v3lock"
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
- v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw"
- "github.com/coreos/etcd/etcdserver/api/v3rpc"
- etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw"
- "github.com/coreos/etcd/pkg/debugutil"
- "github.com/coreos/etcd/pkg/transport"
-
- gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/soheilhy/cmux"
- "github.com/tmc/grpc-websocket-proxy/wsproxy"
- "golang.org/x/net/trace"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
-)
-
-type serveCtx struct {
- l net.Listener
- addr string
- secure bool
- insecure bool
-
- ctx context.Context
- cancel context.CancelFunc
-
- userHandlers map[string]http.Handler
- serviceRegister func(*grpc.Server)
- serversC chan *servers
-}
-
-type servers struct {
- secure bool
- grpc *grpc.Server
- http *http.Server
-}
-
-func newServeCtx() *serveCtx {
- ctx, cancel := context.WithCancel(context.Background())
- return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler),
- serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
- }
-}
-
-// serve accepts incoming connections on the listener l,
-// creating a new service goroutine for each. The service goroutines
-// read requests and then call handler to reply to them.
-func (sctx *serveCtx) serve(
- s *etcdserver.EtcdServer,
- tlsinfo *transport.TLSInfo,
- handler http.Handler,
- errHandler func(error),
- gopts ...grpc.ServerOption) (err error) {
- logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
- <-s.ReadyNotify()
- plog.Info("ready to serve client requests")
-
- m := cmux.New(sctx.l)
- v3c := v3client.New(s)
- servElection := v3election.NewElectionServer(v3c)
- servLock := v3lock.NewLockServer(v3c)
-
- var gs *grpc.Server
- defer func() {
- if err != nil && gs != nil {
- gs.Stop()
- }
- }()
-
- if sctx.insecure {
- gs = v3rpc.Server(s, nil, gopts...)
- v3electionpb.RegisterElectionServer(gs, servElection)
- v3lockpb.RegisterLockServer(gs, servLock)
- if sctx.serviceRegister != nil {
- sctx.serviceRegister(gs)
- }
- grpcl := m.Match(cmux.HTTP2())
- go func() { errHandler(gs.Serve(grpcl)) }()
-
- var gwmux *gw.ServeMux
- gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
- if err != nil {
- return err
- }
-
- httpmux := sctx.createMux(gwmux, handler)
-
- srvhttp := &http.Server{
- Handler: wrapMux(httpmux),
- ErrorLog: logger, // do not log user error
- }
- httpl := m.Match(cmux.HTTP1())
- go func() { errHandler(srvhttp.Serve(httpl)) }()
-
- sctx.serversC <- &servers{grpc: gs, http: srvhttp}
- plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
- }
-
- if sctx.secure {
- tlscfg, tlsErr := tlsinfo.ServerConfig()
- if tlsErr != nil {
- return tlsErr
- }
- gs = v3rpc.Server(s, tlscfg, gopts...)
- v3electionpb.RegisterElectionServer(gs, servElection)
- v3lockpb.RegisterLockServer(gs, servLock)
- if sctx.serviceRegister != nil {
- sctx.serviceRegister(gs)
- }
- handler = grpcHandlerFunc(gs, handler)
-
- dtls := tlscfg.Clone()
- // trust local server
- dtls.InsecureSkipVerify = true
- creds := credentials.NewTLS(dtls)
- opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
- var gwmux *gw.ServeMux
- gwmux, err = sctx.registerGateway(opts)
- if err != nil {
- return err
- }
-
- var tlsl net.Listener
- tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
- if err != nil {
- return err
- }
- // TODO: add debug flag; enable logging when debug flag is set
- httpmux := sctx.createMux(gwmux, handler)
-
- srv := &http.Server{
- Handler: wrapMux(httpmux),
- TLSConfig: tlscfg,
- ErrorLog: logger, // do not log user error
- }
- go func() { errHandler(srv.Serve(tlsl)) }()
-
- sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
- plog.Infof("serving client requests on %s", sctx.l.Addr().String())
- }
-
- close(sctx.serversC)
- return m.Serve()
-}
-
-// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
-// connections or otherHandler otherwise. Given in gRPC docs.
-func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
- if otherHandler == nil {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- grpcServer.ServeHTTP(w, r)
- })
- }
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
- grpcServer.ServeHTTP(w, r)
- } else {
- otherHandler.ServeHTTP(w, r)
- }
- })
-}
-
-type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
-
-func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
- ctx := sctx.ctx
- conn, err := grpc.DialContext(ctx, sctx.addr, opts...)
- if err != nil {
- return nil, err
- }
- gwmux := gw.NewServeMux()
-
- handlers := []registerHandlerFunc{
- etcdservergw.RegisterKVHandler,
- etcdservergw.RegisterWatchHandler,
- etcdservergw.RegisterLeaseHandler,
- etcdservergw.RegisterClusterHandler,
- etcdservergw.RegisterMaintenanceHandler,
- etcdservergw.RegisterAuthHandler,
- v3lockgw.RegisterLockHandler,
- v3electiongw.RegisterElectionHandler,
- }
- for _, h := range handlers {
- if err := h(ctx, gwmux, conn); err != nil {
- return nil, err
- }
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
- }
- }()
-
- return gwmux, nil
-}
-
-func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
- httpmux := http.NewServeMux()
- for path, h := range sctx.userHandlers {
- httpmux.Handle(path, h)
- }
-
- httpmux.Handle(
- "/v3beta/",
- wsproxy.WebsocketProxy(
- gwmux,
- wsproxy.WithRequestMutator(
- // Default to the POST method for streams
- func(incoming *http.Request, outgoing *http.Request) *http.Request {
- outgoing.Method = "POST"
- return outgoing
- },
- ),
- ),
- )
- if handler != nil {
- httpmux.Handle("/", handler)
- }
- return httpmux
-}
-
-// wraps HTTP multiplexer to mute requests to /v3alpha
-// TODO: deprecate this in 3.4 release
-func wrapMux(mux *http.ServeMux) http.Handler { return &v3alphaMutator{mux: mux} }
-
-type v3alphaMutator struct {
- mux *http.ServeMux
-}
-
-func (m *v3alphaMutator) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- if req != nil && req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3alpha/") {
- req.URL.Path = strings.Replace(req.URL.Path, "/v3alpha/", "/v3beta/", 1)
- }
- m.mux.ServeHTTP(rw, req)
-}
-
-func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
- if sctx.userHandlers[s] != nil {
- plog.Warningf("path %s already registered by user handler", s)
- return
- }
- sctx.userHandlers[s] = h
-}
-
-func (sctx *serveCtx) registerPprof() {
- for p, h := range debugutil.PProfHandlers() {
- sctx.registerUserHandler(p, h)
- }
-}
-
-func (sctx *serveCtx) registerTrace() {
- reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
- sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
- evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
- sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
-}
diff --git a/vendor/github.com/coreos/etcd/embed/util.go b/vendor/github.com/coreos/etcd/embed/util.go
deleted file mode 100644
index 168e031..0000000
--- a/vendor/github.com/coreos/etcd/embed/util.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "path/filepath"
-
- "github.com/coreos/etcd/wal"
-)
-
-func isMemberInitialized(cfg *Config) bool {
- waldir := cfg.WalDir
- if waldir == "" {
- waldir = filepath.Join(cfg.Dir, "member", "wal")
- }
-
- return wal.Exist(waldir)
-}
diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go
deleted file mode 100644
index b541a62..0000000
--- a/vendor/github.com/coreos/etcd/error/error.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package error describes errors in etcd project. When any change happens,
-// Documentation/v2/errorcode.md needs to be updated correspondingly.
-package error
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-)
-
-var errors = map[int]string{
- // command related errors
- EcodeKeyNotFound: "Key not found",
- EcodeTestFailed: "Compare failed", //test and set
- EcodeNotFile: "Not a file",
- ecodeNoMorePeer: "Reached the max number of peers in the cluster",
- EcodeNotDir: "Not a directory",
- EcodeNodeExist: "Key already exists", // create
- ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd",
- EcodeRootROnly: "Root is read only",
- EcodeDirNotEmpty: "Directory not empty",
- ecodeExistingPeerAddr: "Peer address has existed",
- EcodeUnauthorized: "The request requires user authentication",
-
- // Post form related errors
- ecodeValueRequired: "Value is Required in POST form",
- EcodePrevValueRequired: "PrevValue is Required in POST form",
- EcodeTTLNaN: "The given TTL in POST form is not a number",
- EcodeIndexNaN: "The given index in POST form is not a number",
- ecodeValueOrTTLRequired: "Value or TTL is required in POST form",
- ecodeTimeoutNaN: "The given timeout in POST form is not a number",
- ecodeNameRequired: "Name is required in POST form",
- ecodeIndexOrValueRequired: "Index or value is required",
- ecodeIndexValueMutex: "Index and value cannot both be specified",
- EcodeInvalidField: "Invalid field",
- EcodeInvalidForm: "Invalid POST form",
- EcodeRefreshValue: "Value provided on refresh",
- EcodeRefreshTTLRequired: "A TTL must be provided on refresh",
-
- // raft related errors
- EcodeRaftInternal: "Raft Internal Error",
- EcodeLeaderElect: "During Leader Election",
-
- // etcd related errors
- EcodeWatcherCleared: "watcher is cleared due to etcd recovery",
- EcodeEventIndexCleared: "The event in requested index is outdated and cleared",
- ecodeStandbyInternal: "Standby Internal Error",
- ecodeInvalidActiveSize: "Invalid active size",
- ecodeInvalidRemoveDelay: "Standby remove delay",
-
- // client related errors
- ecodeClientInternal: "Client Internal Error",
-}
-
-var errorStatus = map[int]int{
- EcodeKeyNotFound: http.StatusNotFound,
- EcodeNotFile: http.StatusForbidden,
- EcodeDirNotEmpty: http.StatusForbidden,
- EcodeUnauthorized: http.StatusUnauthorized,
- EcodeTestFailed: http.StatusPreconditionFailed,
- EcodeNodeExist: http.StatusPreconditionFailed,
- EcodeRaftInternal: http.StatusInternalServerError,
- EcodeLeaderElect: http.StatusInternalServerError,
-}
-
-const (
- EcodeKeyNotFound = 100
- EcodeTestFailed = 101
- EcodeNotFile = 102
- ecodeNoMorePeer = 103
- EcodeNotDir = 104
- EcodeNodeExist = 105
- ecodeKeyIsPreserved = 106
- EcodeRootROnly = 107
- EcodeDirNotEmpty = 108
- ecodeExistingPeerAddr = 109
- EcodeUnauthorized = 110
-
- ecodeValueRequired = 200
- EcodePrevValueRequired = 201
- EcodeTTLNaN = 202
- EcodeIndexNaN = 203
- ecodeValueOrTTLRequired = 204
- ecodeTimeoutNaN = 205
- ecodeNameRequired = 206
- ecodeIndexOrValueRequired = 207
- ecodeIndexValueMutex = 208
- EcodeInvalidField = 209
- EcodeInvalidForm = 210
- EcodeRefreshValue = 211
- EcodeRefreshTTLRequired = 212
-
- EcodeRaftInternal = 300
- EcodeLeaderElect = 301
-
- EcodeWatcherCleared = 400
- EcodeEventIndexCleared = 401
- ecodeStandbyInternal = 402
- ecodeInvalidActiveSize = 403
- ecodeInvalidRemoveDelay = 404
-
- ecodeClientInternal = 500
-)
-
-type Error struct {
- ErrorCode int `json:"errorCode"`
- Message string `json:"message"`
- Cause string `json:"cause,omitempty"`
- Index uint64 `json:"index"`
-}
-
-func NewRequestError(errorCode int, cause string) *Error {
- return NewError(errorCode, cause, 0)
-}
-
-func NewError(errorCode int, cause string, index uint64) *Error {
- return &Error{
- ErrorCode: errorCode,
- Message: errors[errorCode],
- Cause: cause,
- Index: index,
- }
-}
-
-// Error is for the error interface
-func (e Error) Error() string {
- return e.Message + " (" + e.Cause + ")"
-}
-
-func (e Error) toJsonString() string {
- b, _ := json.Marshal(e)
- return string(b)
-}
-
-func (e Error) StatusCode() int {
- status, ok := errorStatus[e.ErrorCode]
- if !ok {
- status = http.StatusBadRequest
- }
- return status
-}
-
-func (e Error) WriteTo(w http.ResponseWriter) error {
- w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(e.StatusCode())
- _, err := w.Write([]byte(e.toJsonString() + "\n"))
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/etcd.conf.yml.sample b/vendor/github.com/coreos/etcd/etcd.conf.yml.sample
deleted file mode 100644
index 2bc115f..0000000
--- a/vendor/github.com/coreos/etcd/etcd.conf.yml.sample
+++ /dev/null
@@ -1,144 +0,0 @@
-# This is the configuration file for the etcd server.
-
-# Human-readable name for this member.
-name: 'default'
-
-# Path to the data directory.
-data-dir:
-
-# Path to the dedicated wal directory.
-wal-dir:
-
-# Number of committed transactions to trigger a snapshot to disk.
-snapshot-count: 10000
-
-# Time (in milliseconds) of a heartbeat interval.
-heartbeat-interval: 100
-
-# Time (in milliseconds) for an election to timeout.
-election-timeout: 1000
-
-# Raise alarms when backend size exceeds the given quota. 0 means use the
-# default quota.
-quota-backend-bytes: 0
-
-# List of comma separated URLs to listen on for peer traffic.
-listen-peer-urls: http://localhost:2380
-
-# List of comma separated URLs to listen on for client traffic.
-listen-client-urls: http://localhost:2379
-
-# Maximum number of snapshot files to retain (0 is unlimited).
-max-snapshots: 5
-
-# Maximum number of wal files to retain (0 is unlimited).
-max-wals: 5
-
-# Comma-separated white list of origins for CORS (cross-origin resource sharing).
-cors:
-
-# List of this member's peer URLs to advertise to the rest of the cluster.
-# The URLs needed to be a comma-separated list.
-initial-advertise-peer-urls: http://localhost:2380
-
-# List of this member's client URLs to advertise to the public.
-# The URLs needed to be a comma-separated list.
-advertise-client-urls: http://localhost:2379
-
-# Discovery URL used to bootstrap the cluster.
-discovery:
-
-# Valid values include 'exit', 'proxy'
-discovery-fallback: 'proxy'
-
-# HTTP proxy to use for traffic to discovery service.
-discovery-proxy:
-
-# DNS domain used to bootstrap initial cluster.
-discovery-srv:
-
-# Initial cluster configuration for bootstrapping.
-initial-cluster:
-
-# Initial cluster token for the etcd cluster during bootstrap.
-initial-cluster-token: 'etcd-cluster'
-
-# Initial cluster state ('new' or 'existing').
-initial-cluster-state: 'new'
-
-# Reject reconfiguration requests that would cause quorum loss.
-strict-reconfig-check: false
-
-# Accept etcd V2 client requests
-enable-v2: true
-
-# Enable runtime profiling data via HTTP server
-enable-pprof: true
-
-# Valid values include 'on', 'readonly', 'off'
-proxy: 'off'
-
-# Time (in milliseconds) an endpoint will be held in a failed state.
-proxy-failure-wait: 5000
-
-# Time (in milliseconds) of the endpoints refresh interval.
-proxy-refresh-interval: 30000
-
-# Time (in milliseconds) for a dial to timeout.
-proxy-dial-timeout: 1000
-
-# Time (in milliseconds) for a write to timeout.
-proxy-write-timeout: 5000
-
-# Time (in milliseconds) for a read to timeout.
-proxy-read-timeout: 0
-
-client-transport-security:
- # DEPRECATED: Path to the client server TLS CA file.
- ca-file:
-
- # Path to the client server TLS cert file.
- cert-file:
-
- # Path to the client server TLS key file.
- key-file:
-
- # Enable client cert authentication.
- client-cert-auth: false
-
- # Path to the client server TLS trusted CA cert file.
- trusted-ca-file:
-
- # Client TLS using generated certificates
- auto-tls: false
-
-peer-transport-security:
- # DEPRECATED: Path to the peer server TLS CA file.
- ca-file:
-
- # Path to the peer server TLS cert file.
- cert-file:
-
- # Path to the peer server TLS key file.
- key-file:
-
- # Enable peer client cert authentication.
- peer-client-cert-auth: false
-
- # Path to the peer server TLS trusted CA cert file.
- trusted-ca-file:
-
- # Peer TLS using generated certificates.
- auto-tls: false
-
-# Enable debug-level logging for etcd.
-debug: false
-
-# Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG'.
-log-package-levels:
-
-# Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
-log-output: default
-
-# Force to create a new one member cluster.
-force-new-cluster: false
diff --git a/vendor/github.com/coreos/etcd/etcdmain/config.go b/vendor/github.com/coreos/etcd/etcdmain/config.go
deleted file mode 100644
index 2a5faa7..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/config.go
+++ /dev/null
@@ -1,346 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Every change should be reflected on help.go as well.
-
-package etcdmain
-
-import (
- "flag"
- "fmt"
- "io/ioutil"
- "net/url"
- "os"
- "runtime"
- "strings"
-
- "github.com/coreos/etcd/embed"
- "github.com/coreos/etcd/pkg/flags"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/version"
-
- "github.com/ghodss/yaml"
-)
-
-var (
- proxyFlagOff = "off"
- proxyFlagReadonly = "readonly"
- proxyFlagOn = "on"
-
- fallbackFlagExit = "exit"
- fallbackFlagProxy = "proxy"
-
- ignored = []string{
- "cluster-active-size",
- "cluster-remove-delay",
- "cluster-sync-interval",
- "config",
- "force",
- "max-result-buffer",
- "max-retry-attempts",
- "peer-heartbeat-interval",
- "peer-election-timeout",
- "retry-interval",
- "snapshot",
- "v",
- "vv",
- // for coverage testing
- "test.coverprofile",
- "test.outputdir",
- }
-)
-
-type configProxy struct {
- ProxyFailureWaitMs uint `json:"proxy-failure-wait"`
- ProxyRefreshIntervalMs uint `json:"proxy-refresh-interval"`
- ProxyDialTimeoutMs uint `json:"proxy-dial-timeout"`
- ProxyWriteTimeoutMs uint `json:"proxy-write-timeout"`
- ProxyReadTimeoutMs uint `json:"proxy-read-timeout"`
- Fallback string
- Proxy string
- ProxyJSON string `json:"proxy"`
- FallbackJSON string `json:"discovery-fallback"`
-}
-
-// config holds the config for a command line invocation of etcd
-type config struct {
- ec embed.Config
- cp configProxy
- cf configFlags
- configFile string
- printVersion bool
- ignored []string
-}
-
-// configFlags has the set of flags used for command line parsing a Config
-type configFlags struct {
- flagSet *flag.FlagSet
- clusterState *flags.StringsFlag
- fallback *flags.StringsFlag
- proxy *flags.StringsFlag
-}
-
-func newConfig() *config {
- cfg := &config{
- ec: *embed.NewConfig(),
- cp: configProxy{
- Proxy: proxyFlagOff,
- ProxyFailureWaitMs: 5000,
- ProxyRefreshIntervalMs: 30000,
- ProxyDialTimeoutMs: 1000,
- ProxyWriteTimeoutMs: 5000,
- },
- ignored: ignored,
- }
- cfg.cf = configFlags{
- flagSet: flag.NewFlagSet("etcd", flag.ContinueOnError),
- clusterState: flags.NewStringsFlag(
- embed.ClusterStateFlagNew,
- embed.ClusterStateFlagExisting,
- ),
- fallback: flags.NewStringsFlag(
- fallbackFlagProxy,
- fallbackFlagExit,
- ),
- proxy: flags.NewStringsFlag(
- proxyFlagOff,
- proxyFlagReadonly,
- proxyFlagOn,
- ),
- }
-
- fs := cfg.cf.flagSet
- fs.Usage = func() {
- fmt.Fprintln(os.Stderr, usageline)
- }
-
- fs.StringVar(&cfg.configFile, "config-file", "", "Path to the server configuration file")
-
- // member
- fs.Var(cfg.ec.CorsInfo, "cors", "Comma-separated white list of origins for CORS (cross-origin resource sharing).")
- fs.StringVar(&cfg.ec.Dir, "data-dir", cfg.ec.Dir, "Path to the data directory.")
- fs.StringVar(&cfg.ec.WalDir, "wal-dir", cfg.ec.WalDir, "Path to the dedicated wal directory.")
- fs.Var(flags.NewURLsValue(embed.DefaultListenPeerURLs), "listen-peer-urls", "List of URLs to listen on for peer traffic.")
- fs.Var(flags.NewURLsValue(embed.DefaultListenClientURLs), "listen-client-urls", "List of URLs to listen on for client traffic.")
- fs.StringVar(&cfg.ec.ListenMetricsUrlsJSON, "listen-metrics-urls", "", "List of URLs to listen on for metrics.")
- fs.UintVar(&cfg.ec.MaxSnapFiles, "max-snapshots", cfg.ec.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited).")
- fs.UintVar(&cfg.ec.MaxWalFiles, "max-wals", cfg.ec.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).")
- fs.StringVar(&cfg.ec.Name, "name", cfg.ec.Name, "Human-readable name for this member.")
- fs.Uint64Var(&cfg.ec.SnapCount, "snapshot-count", cfg.ec.SnapCount, "Number of committed transactions to trigger a snapshot to disk.")
- fs.UintVar(&cfg.ec.TickMs, "heartbeat-interval", cfg.ec.TickMs, "Time (in milliseconds) of a heartbeat interval.")
- fs.UintVar(&cfg.ec.ElectionMs, "election-timeout", cfg.ec.ElectionMs, "Time (in milliseconds) for an election to timeout.")
- fs.BoolVar(&cfg.ec.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.ec.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.")
- fs.Int64Var(&cfg.ec.QuotaBackendBytes, "quota-backend-bytes", cfg.ec.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.")
- fs.UintVar(&cfg.ec.MaxTxnOps, "max-txn-ops", cfg.ec.MaxTxnOps, "Maximum number of operations permitted in a transaction.")
- fs.UintVar(&cfg.ec.MaxRequestBytes, "max-request-bytes", cfg.ec.MaxRequestBytes, "Maximum client request size in bytes the server will accept.")
- fs.DurationVar(&cfg.ec.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.ec.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.")
- fs.DurationVar(&cfg.ec.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.ec.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
- fs.DurationVar(&cfg.ec.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.ec.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
-
- // clustering
- fs.Var(flags.NewURLsValue(embed.DefaultInitialAdvertisePeerURLs), "initial-advertise-peer-urls", "List of this member's peer URLs to advertise to the rest of the cluster.")
- fs.Var(flags.NewURLsValue(embed.DefaultAdvertiseClientURLs), "advertise-client-urls", "List of this member's client URLs to advertise to the public.")
- fs.StringVar(&cfg.ec.Durl, "discovery", cfg.ec.Durl, "Discovery URL used to bootstrap the cluster.")
- fs.Var(cfg.cf.fallback, "discovery-fallback", fmt.Sprintf("Valid values include %s", strings.Join(cfg.cf.fallback.Values, ", ")))
-
- fs.StringVar(&cfg.ec.Dproxy, "discovery-proxy", cfg.ec.Dproxy, "HTTP proxy to use for traffic to discovery service.")
- fs.StringVar(&cfg.ec.DNSCluster, "discovery-srv", cfg.ec.DNSCluster, "DNS domain used to bootstrap initial cluster.")
- fs.StringVar(&cfg.ec.InitialCluster, "initial-cluster", cfg.ec.InitialCluster, "Initial cluster configuration for bootstrapping.")
- fs.StringVar(&cfg.ec.InitialClusterToken, "initial-cluster-token", cfg.ec.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
- fs.Var(cfg.cf.clusterState, "initial-cluster-state", "Initial cluster state ('new' or 'existing').")
-
- fs.BoolVar(&cfg.ec.StrictReconfigCheck, "strict-reconfig-check", cfg.ec.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
- fs.BoolVar(&cfg.ec.EnableV2, "enable-v2", cfg.ec.EnableV2, "Accept etcd V2 client requests.")
- fs.StringVar(&cfg.ec.ExperimentalEnableV2V3, "experimental-enable-v2v3", cfg.ec.ExperimentalEnableV2V3, "v3 prefix for serving emulated v2 state.")
-
- // proxy
- fs.Var(cfg.cf.proxy, "proxy", fmt.Sprintf("Valid values include %s", strings.Join(cfg.cf.proxy.Values, ", ")))
-
- fs.UintVar(&cfg.cp.ProxyFailureWaitMs, "proxy-failure-wait", cfg.cp.ProxyFailureWaitMs, "Time (in milliseconds) an endpoint will be held in a failed state.")
- fs.UintVar(&cfg.cp.ProxyRefreshIntervalMs, "proxy-refresh-interval", cfg.cp.ProxyRefreshIntervalMs, "Time (in milliseconds) of the endpoints refresh interval.")
- fs.UintVar(&cfg.cp.ProxyDialTimeoutMs, "proxy-dial-timeout", cfg.cp.ProxyDialTimeoutMs, "Time (in milliseconds) for a dial to timeout.")
- fs.UintVar(&cfg.cp.ProxyWriteTimeoutMs, "proxy-write-timeout", cfg.cp.ProxyWriteTimeoutMs, "Time (in milliseconds) for a write to timeout.")
- fs.UintVar(&cfg.cp.ProxyReadTimeoutMs, "proxy-read-timeout", cfg.cp.ProxyReadTimeoutMs, "Time (in milliseconds) for a read to timeout.")
-
- // security
- fs.StringVar(&cfg.ec.ClientTLSInfo.CAFile, "ca-file", "", "DEPRECATED: Path to the client server TLS CA file.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.")
- fs.BoolVar(&cfg.ec.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.")
- fs.StringVar(&cfg.ec.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.")
- fs.BoolVar(&cfg.ec.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates")
- fs.StringVar(&cfg.ec.PeerTLSInfo.CAFile, "peer-ca-file", "", "DEPRECATED: Path to the peer server TLS CA file.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.")
- fs.BoolVar(&cfg.ec.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.")
- fs.BoolVar(&cfg.ec.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates")
- fs.StringVar(&cfg.ec.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.")
- fs.StringVar(&cfg.ec.PeerTLSInfo.AllowedCN, "peer-cert-allowed-cn", "", "Allowed CN for inter peer authentication.")
-
- fs.Var(flags.NewStringsValueV2(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
-
- // logging
- fs.BoolVar(&cfg.ec.Debug, "debug", false, "Enable debug-level logging for etcd.")
- fs.StringVar(&cfg.ec.LogPkgLevels, "log-package-levels", "", "Specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').")
- fs.StringVar(&cfg.ec.LogOutput, "log-output", embed.DefaultLogOutput, "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.")
-
- // unsafe
- fs.BoolVar(&cfg.ec.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
-
- // version
- fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.")
-
- fs.StringVar(&cfg.ec.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.")
- fs.StringVar(&cfg.ec.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.")
-
- // pprof profiler via HTTP
- fs.BoolVar(&cfg.ec.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"")
-
- // additional metrics
- fs.StringVar(&cfg.ec.Metrics, "metrics", cfg.ec.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include histogram metrics")
-
- // auth
- fs.StringVar(&cfg.ec.AuthToken, "auth-token", cfg.ec.AuthToken, "Specify auth token specific options.")
-
- // experimental
- fs.BoolVar(&cfg.ec.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ec.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.")
- fs.DurationVar(&cfg.ec.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ec.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes.")
-
- // ignored
- for _, f := range cfg.ignored {
- fs.Var(&flags.IgnoredFlag{Name: f}, f, "")
- }
- return cfg
-}
-
-func (cfg *config) parse(arguments []string) error {
- perr := cfg.cf.flagSet.Parse(arguments)
- switch perr {
- case nil:
- case flag.ErrHelp:
- fmt.Println(flagsline)
- os.Exit(0)
- default:
- os.Exit(2)
- }
- if len(cfg.cf.flagSet.Args()) != 0 {
- return fmt.Errorf("'%s' is not a valid flag", cfg.cf.flagSet.Arg(0))
- }
-
- if cfg.printVersion {
- fmt.Printf("etcd Version: %s\n", version.Version)
- fmt.Printf("Git SHA: %s\n", version.GitSHA)
- fmt.Printf("Go Version: %s\n", runtime.Version())
- fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
- os.Exit(0)
- }
-
- var err error
- if cfg.configFile != "" {
- plog.Infof("Loading server configuration from %q", cfg.configFile)
- err = cfg.configFromFile(cfg.configFile)
- } else {
- err = cfg.configFromCmdLine()
- }
- return err
-}
-
-func (cfg *config) configFromCmdLine() error {
- err := flags.SetFlagsFromEnv("ETCD", cfg.cf.flagSet)
- if err != nil {
- plog.Fatalf("%v", err)
- }
-
- cfg.ec.LPUrls = flags.URLsFromFlag(cfg.cf.flagSet, "listen-peer-urls")
- cfg.ec.APUrls = flags.URLsFromFlag(cfg.cf.flagSet, "initial-advertise-peer-urls")
- cfg.ec.LCUrls = flags.URLsFromFlag(cfg.cf.flagSet, "listen-client-urls")
- cfg.ec.ACUrls = flags.URLsFromFlag(cfg.cf.flagSet, "advertise-client-urls")
-
- if len(cfg.ec.ListenMetricsUrlsJSON) > 0 {
- u, err := types.NewURLs(strings.Split(cfg.ec.ListenMetricsUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err)
- }
- cfg.ec.ListenMetricsUrls = []url.URL(u)
- }
-
- cfg.ec.CipherSuites = flags.StringsFromFlagV2(cfg.cf.flagSet, "cipher-suites")
-
- cfg.ec.ClusterState = cfg.cf.clusterState.String()
- cfg.cp.Fallback = cfg.cf.fallback.String()
- cfg.cp.Proxy = cfg.cf.proxy.String()
-
- // disable default advertise-client-urls if lcurls is set
- missingAC := flags.IsSet(cfg.cf.flagSet, "listen-client-urls") && !flags.IsSet(cfg.cf.flagSet, "advertise-client-urls")
- if !cfg.mayBeProxy() && missingAC {
- cfg.ec.ACUrls = nil
- }
-
- // disable default initial-cluster if discovery is set
- if (cfg.ec.Durl != "" || cfg.ec.DNSCluster != "") && !flags.IsSet(cfg.cf.flagSet, "initial-cluster") {
- cfg.ec.InitialCluster = ""
- }
-
- return cfg.validate()
-}
-
-func (cfg *config) configFromFile(path string) error {
- eCfg, err := embed.ConfigFromFile(path)
- if err != nil {
- return err
- }
- cfg.ec = *eCfg
-
- // load extra config information
- b, rerr := ioutil.ReadFile(path)
- if rerr != nil {
- return rerr
- }
- if yerr := yaml.Unmarshal(b, &cfg.cp); yerr != nil {
- return yerr
- }
- if cfg.cp.FallbackJSON != "" {
- if err := cfg.cf.fallback.Set(cfg.cp.FallbackJSON); err != nil {
- plog.Panicf("unexpected error setting up discovery-fallback flag: %v", err)
- }
- cfg.cp.Fallback = cfg.cf.fallback.String()
- }
- if cfg.cp.ProxyJSON != "" {
- if err := cfg.cf.proxy.Set(cfg.cp.ProxyJSON); err != nil {
- plog.Panicf("unexpected error setting up proxyFlag: %v", err)
- }
- cfg.cp.Proxy = cfg.cf.proxy.String()
- }
- return nil
-}
-
-func (cfg *config) mayBeProxy() bool {
- mayFallbackToProxy := cfg.ec.Durl != "" && cfg.cp.Fallback == fallbackFlagProxy
- return cfg.cp.Proxy != proxyFlagOff || mayFallbackToProxy
-}
-
-func (cfg *config) validate() error {
- err := cfg.ec.Validate()
- // TODO(yichengq): check this for joining through discovery service case
- if err == embed.ErrUnsetAdvertiseClientURLsFlag && cfg.mayBeProxy() {
- return nil
- }
- return err
-}
-
-func (cfg config) isProxy() bool { return cfg.cf.proxy.String() != proxyFlagOff }
-func (cfg config) isReadonlyProxy() bool { return cfg.cf.proxy.String() == proxyFlagReadonly }
-func (cfg config) shouldFallbackToProxy() bool { return cfg.cf.fallback.String() == fallbackFlagProxy }
diff --git a/vendor/github.com/coreos/etcd/etcdmain/doc.go b/vendor/github.com/coreos/etcd/etcdmain/doc.go
deleted file mode 100644
index ff281aa..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package etcdmain contains the main entry point for the etcd binary.
-package etcdmain
diff --git a/vendor/github.com/coreos/etcd/etcdmain/etcd.go b/vendor/github.com/coreos/etcd/etcdmain/etcd.go
deleted file mode 100644
index 87e9b25..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/etcd.go
+++ /dev/null
@@ -1,399 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdmain
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "strings"
- "time"
-
- "github.com/coreos/etcd/discovery"
- "github.com/coreos/etcd/embed"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/pkg/cors"
- "github.com/coreos/etcd/pkg/fileutil"
- pkgioutil "github.com/coreos/etcd/pkg/ioutil"
- "github.com/coreos/etcd/pkg/osutil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/proxy/httpproxy"
- "github.com/coreos/etcd/version"
-
- "github.com/coreos/pkg/capnslog"
- "google.golang.org/grpc"
-)
-
-type dirType string
-
-var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdmain")
-
-var (
- dirMember = dirType("member")
- dirProxy = dirType("proxy")
- dirEmpty = dirType("empty")
-)
-
-func startEtcdOrProxyV2() {
- grpc.EnableTracing = false
-
- cfg := newConfig()
- defaultInitialCluster := cfg.ec.InitialCluster
-
- err := cfg.parse(os.Args[1:])
- if err != nil {
- plog.Errorf("error verifying flags, %v. See 'etcd --help'.", err)
- switch err {
- case embed.ErrUnsetAdvertiseClientURLsFlag:
- plog.Errorf("When listening on specific address(es), this etcd process must advertise accessible url(s) to each connected client.")
- }
- os.Exit(1)
- }
- cfg.ec.SetupLogging()
-
- var stopped <-chan struct{}
- var errc <-chan error
-
- plog.Infof("etcd Version: %s\n", version.Version)
- plog.Infof("Git SHA: %s\n", version.GitSHA)
- plog.Infof("Go Version: %s\n", runtime.Version())
- plog.Infof("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
-
- GoMaxProcs := runtime.GOMAXPROCS(0)
- plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", GoMaxProcs, runtime.NumCPU())
-
- defaultHost, dhErr := (&cfg.ec).UpdateDefaultClusterFromName(defaultInitialCluster)
- if defaultHost != "" {
- plog.Infof("advertising using detected default host %q", defaultHost)
- }
- if dhErr != nil {
- plog.Noticef("failed to detect default host (%v)", dhErr)
- }
-
- if cfg.ec.Dir == "" {
- cfg.ec.Dir = fmt.Sprintf("%v.etcd", cfg.ec.Name)
- plog.Warningf("no data-dir provided, using default data-dir ./%s", cfg.ec.Dir)
- }
-
- which := identifyDataDirOrDie(cfg.ec.Dir)
- if which != dirEmpty {
- plog.Noticef("the server is already initialized as %v before, starting as etcd %v...", which, which)
- switch which {
- case dirMember:
- stopped, errc, err = startEtcd(&cfg.ec)
- case dirProxy:
- err = startProxy(cfg)
- default:
- plog.Panicf("unhandled dir type %v", which)
- }
- } else {
- shouldProxy := cfg.isProxy()
- if !shouldProxy {
- stopped, errc, err = startEtcd(&cfg.ec)
- if derr, ok := err.(*etcdserver.DiscoveryError); ok && derr.Err == discovery.ErrFullCluster {
- if cfg.shouldFallbackToProxy() {
- plog.Noticef("discovery cluster full, falling back to %s", fallbackFlagProxy)
- shouldProxy = true
- }
- }
- }
- if shouldProxy {
- err = startProxy(cfg)
- }
- }
-
- if err != nil {
- if derr, ok := err.(*etcdserver.DiscoveryError); ok {
- switch derr.Err {
- case discovery.ErrDuplicateID:
- plog.Errorf("member %q has previously registered with discovery service token (%s).", cfg.ec.Name, cfg.ec.Durl)
- plog.Errorf("But etcd could not find valid cluster configuration in the given data dir (%s).", cfg.ec.Dir)
- plog.Infof("Please check the given data dir path if the previous bootstrap succeeded")
- plog.Infof("or use a new discovery token if the previous bootstrap failed.")
- case discovery.ErrDuplicateName:
- plog.Errorf("member with duplicated name has registered with discovery service token(%s).", cfg.ec.Durl)
- plog.Errorf("please check (cURL) the discovery token for more information.")
- plog.Errorf("please do not reuse the discovery token and generate a new one to bootstrap the cluster.")
- default:
- plog.Errorf("%v", err)
- plog.Infof("discovery token %s was used, but failed to bootstrap the cluster.", cfg.ec.Durl)
- plog.Infof("please generate a new discovery token and try to bootstrap again.")
- }
- os.Exit(1)
- }
-
- if strings.Contains(err.Error(), "include") && strings.Contains(err.Error(), "--initial-cluster") {
- plog.Infof("%v", err)
- if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) {
- plog.Infof("forgot to set --initial-cluster flag?")
- }
- if types.URLs(cfg.ec.APUrls).String() == embed.DefaultInitialAdvertisePeerURLs {
- plog.Infof("forgot to set --initial-advertise-peer-urls flag?")
- }
- if cfg.ec.InitialCluster == cfg.ec.InitialClusterFromName(cfg.ec.Name) && len(cfg.ec.Durl) == 0 {
- plog.Infof("if you want to use discovery service, please set --discovery flag.")
- }
- os.Exit(1)
- }
- plog.Fatalf("%v", err)
- }
-
- osutil.HandleInterrupts()
-
- // At this point, the initialization of etcd is done.
- // The listeners are listening on the TCP ports and ready
- // for accepting connections. The etcd instance should be
- // joined with the cluster and ready to serve incoming
- // connections.
- notifySystemd()
-
- select {
- case lerr := <-errc:
- // fatal out on listener errors
- plog.Fatal(lerr)
- case <-stopped:
- }
-
- osutil.Exit(0)
-}
-
-// startEtcd runs StartEtcd in addition to hooks needed for standalone etcd.
-func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) {
- e, err := embed.StartEtcd(cfg)
- if err != nil {
- return nil, nil, err
- }
- osutil.RegisterInterruptHandler(e.Close)
- select {
- case <-e.Server.ReadyNotify(): // wait for e.Server to join the cluster
- case <-e.Server.StopNotify(): // publish aborted from 'ErrStopped'
- }
- return e.Server.StopNotify(), e.Err(), nil
-}
-
-// startProxy launches an HTTP proxy for client communication which proxies to other etcd nodes.
-func startProxy(cfg *config) error {
- plog.Notice("proxy: this proxy supports v2 API only!")
-
- clientTLSInfo := cfg.ec.ClientTLSInfo
- if clientTLSInfo.Empty() {
- // Support old proxy behavior of defaulting to PeerTLSInfo
- // for both client and peer connections.
- clientTLSInfo = cfg.ec.PeerTLSInfo
- }
- clientTLSInfo.InsecureSkipVerify = cfg.ec.ClientAutoTLS
- cfg.ec.PeerTLSInfo.InsecureSkipVerify = cfg.ec.PeerAutoTLS
-
- pt, err := transport.NewTimeoutTransport(clientTLSInfo, time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond)
- if err != nil {
- return err
- }
- pt.MaxIdleConnsPerHost = httpproxy.DefaultMaxIdleConnsPerHost
-
- if err = cfg.ec.PeerSelfCert(); err != nil {
- plog.Fatalf("could not get certs (%v)", err)
- }
- tr, err := transport.NewTimeoutTransport(cfg.ec.PeerTLSInfo, time.Duration(cfg.cp.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.cp.ProxyWriteTimeoutMs)*time.Millisecond)
- if err != nil {
- return err
- }
-
- cfg.ec.Dir = filepath.Join(cfg.ec.Dir, "proxy")
- err = os.MkdirAll(cfg.ec.Dir, fileutil.PrivateDirMode)
- if err != nil {
- return err
- }
-
- var peerURLs []string
- clusterfile := filepath.Join(cfg.ec.Dir, "cluster")
-
- b, err := ioutil.ReadFile(clusterfile)
- switch {
- case err == nil:
- if cfg.ec.Durl != "" {
- plog.Warningf("discovery token ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
- }
- if cfg.ec.DNSCluster != "" {
- plog.Warningf("DNS SRV discovery ignored since the proxy has already been initialized. Valid cluster file found at %q", clusterfile)
- }
- urls := struct{ PeerURLs []string }{}
- err = json.Unmarshal(b, &urls)
- if err != nil {
- return err
- }
- peerURLs = urls.PeerURLs
- plog.Infof("proxy: using peer urls %v from cluster file %q", peerURLs, clusterfile)
- case os.IsNotExist(err):
- var urlsmap types.URLsMap
- urlsmap, _, err = cfg.ec.PeerURLsMapAndToken("proxy")
- if err != nil {
- return fmt.Errorf("error setting up initial cluster: %v", err)
- }
-
- if cfg.ec.Durl != "" {
- var s string
- s, err = discovery.GetCluster(cfg.ec.Durl, cfg.ec.Dproxy)
- if err != nil {
- return err
- }
- if urlsmap, err = types.NewURLsMap(s); err != nil {
- return err
- }
- }
- peerURLs = urlsmap.URLs()
- plog.Infof("proxy: using peer urls %v ", peerURLs)
- default:
- return err
- }
-
- clientURLs := []string{}
- uf := func() []string {
- gcls, gerr := etcdserver.GetClusterFromRemotePeers(peerURLs, tr)
-
- if gerr != nil {
- plog.Warningf("proxy: %v", gerr)
- return []string{}
- }
-
- clientURLs = gcls.ClientURLs()
-
- urls := struct{ PeerURLs []string }{gcls.PeerURLs()}
- b, jerr := json.Marshal(urls)
- if jerr != nil {
- plog.Warningf("proxy: error on marshal peer urls %s", jerr)
- return clientURLs
- }
-
- err = pkgioutil.WriteAndSyncFile(clusterfile+".bak", b, 0600)
- if err != nil {
- plog.Warningf("proxy: error on writing urls %s", err)
- return clientURLs
- }
- err = os.Rename(clusterfile+".bak", clusterfile)
- if err != nil {
- plog.Warningf("proxy: error on updating clusterfile %s", err)
- return clientURLs
- }
- if !reflect.DeepEqual(gcls.PeerURLs(), peerURLs) {
- plog.Noticef("proxy: updated peer urls in cluster file from %v to %v", peerURLs, gcls.PeerURLs())
- }
- peerURLs = gcls.PeerURLs()
-
- return clientURLs
- }
- ph := httpproxy.NewHandler(pt, uf, time.Duration(cfg.cp.ProxyFailureWaitMs)*time.Millisecond, time.Duration(cfg.cp.ProxyRefreshIntervalMs)*time.Millisecond)
- ph = &cors.CORSHandler{
- Handler: ph,
- Info: cfg.ec.CorsInfo,
- }
-
- if cfg.isReadonlyProxy() {
- ph = httpproxy.NewReadonlyHandler(ph)
- }
-
- // setup self signed certs when serving https
- cHosts, cTLS := []string{}, false
- for _, u := range cfg.ec.LCUrls {
- cHosts = append(cHosts, u.Host)
- cTLS = cTLS || u.Scheme == "https"
- }
- for _, u := range cfg.ec.ACUrls {
- cHosts = append(cHosts, u.Host)
- cTLS = cTLS || u.Scheme == "https"
- }
- listenerTLS := cfg.ec.ClientTLSInfo
- if cfg.ec.ClientAutoTLS && cTLS {
- listenerTLS, err = transport.SelfCert(filepath.Join(cfg.ec.Dir, "clientCerts"), cHosts)
- if err != nil {
- plog.Fatalf("proxy: could not initialize self-signed client certs (%v)", err)
- }
- }
-
- // Start a proxy server goroutine for each listen address
- for _, u := range cfg.ec.LCUrls {
- l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS)
- if err != nil {
- return err
- }
-
- host := u.String()
- go func() {
- plog.Info("proxy: listening for client requests on ", host)
- mux := http.NewServeMux()
- etcdhttp.HandlePrometheus(mux) // v2 proxy just uses the same port
- mux.Handle("/", ph)
- plog.Fatal(http.Serve(l, mux))
- }()
- }
- return nil
-}
-
-// identifyDataDirOrDie returns the type of the data dir.
-// Dies if the datadir is invalid.
-func identifyDataDirOrDie(dir string) dirType {
- names, err := fileutil.ReadDir(dir)
- if err != nil {
- if os.IsNotExist(err) {
- return dirEmpty
- }
- plog.Fatalf("error listing data dir: %s", dir)
- }
-
- var m, p bool
- for _, name := range names {
- switch dirType(name) {
- case dirMember:
- m = true
- case dirProxy:
- p = true
- default:
- plog.Warningf("found invalid file/dir %s under data dir %s (Ignore this if you are upgrading etcd)", name, dir)
- }
- }
-
- if m && p {
- plog.Fatal("invalid datadir. Both member and proxy directories exist.")
- }
- if m {
- return dirMember
- }
- if p {
- return dirProxy
- }
- return dirEmpty
-}
-
-func checkSupportArch() {
- // TODO qualify arm64
- if runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64le" {
- return
- }
- // unsupported arch only configured via environment variable
- // so unset here to not parse through flag
- defer os.Unsetenv("ETCD_UNSUPPORTED_ARCH")
- if env, ok := os.LookupEnv("ETCD_UNSUPPORTED_ARCH"); ok && env == runtime.GOARCH {
- plog.Warningf("running etcd on unsupported architecture %q since ETCD_UNSUPPORTED_ARCH is set", env)
- return
- }
- plog.Errorf("etcd on unsupported platform without ETCD_UNSUPPORTED_ARCH=%s set.", runtime.GOARCH)
- os.Exit(1)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/gateway.go b/vendor/github.com/coreos/etcd/etcdmain/gateway.go
deleted file mode 100644
index 5487414..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/gateway.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdmain
-
-import (
- "fmt"
- "net"
- "net/url"
- "os"
- "time"
-
- "github.com/coreos/etcd/proxy/tcpproxy"
-
- "github.com/spf13/cobra"
-)
-
-var (
- gatewayListenAddr string
- gatewayEndpoints []string
- gatewayDNSCluster string
- gatewayInsecureDiscovery bool
- getewayRetryDelay time.Duration
- gatewayCA string
-)
-
-var (
- rootCmd = &cobra.Command{
- Use: "etcd",
- Short: "etcd server",
- SuggestFor: []string{"etcd"},
- }
-)
-
-func init() {
- rootCmd.AddCommand(newGatewayCommand())
-}
-
-// newGatewayCommand returns the cobra command for "gateway".
-func newGatewayCommand() *cobra.Command {
- lpc := &cobra.Command{
- Use: "gateway <subcommand>",
- Short: "gateway related command",
- }
- lpc.AddCommand(newGatewayStartCommand())
-
- return lpc
-}
-
-func newGatewayStartCommand() *cobra.Command {
- cmd := cobra.Command{
- Use: "start",
- Short: "start the gateway",
- Run: startGateway,
- }
-
- cmd.Flags().StringVar(&gatewayListenAddr, "listen-addr", "127.0.0.1:23790", "listen address")
- cmd.Flags().StringVar(&gatewayDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster")
- cmd.Flags().BoolVar(&gatewayInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
- cmd.Flags().StringVar(&gatewayCA, "trusted-ca-file", "", "path to the client server TLS CA file.")
-
- cmd.Flags().StringSliceVar(&gatewayEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
-
- cmd.Flags().DurationVar(&getewayRetryDelay, "retry-delay", time.Minute, "duration of delay before retrying failed endpoints")
-
- return &cmd
-}
-
-func stripSchema(eps []string) []string {
- var endpoints []string
-
- for _, ep := range eps {
-
- if u, err := url.Parse(ep); err == nil && u.Host != "" {
- ep = u.Host
- }
-
- endpoints = append(endpoints, ep)
- }
-
- return endpoints
-}
-
-func startGateway(cmd *cobra.Command, args []string) {
- srvs := discoverEndpoints(gatewayDNSCluster, gatewayCA, gatewayInsecureDiscovery)
- if len(srvs.Endpoints) == 0 {
- // no endpoints discovered, fall back to provided endpoints
- srvs.Endpoints = gatewayEndpoints
- }
- // Strip the schema from the endpoints because we start just a TCP proxy
- srvs.Endpoints = stripSchema(srvs.Endpoints)
- if len(srvs.SRVs) == 0 {
- for _, ep := range srvs.Endpoints {
- h, p, err := net.SplitHostPort(ep)
- if err != nil {
- plog.Fatalf("error parsing endpoint %q", ep)
- }
- var port uint16
- fmt.Sscanf(p, "%d", &port)
- srvs.SRVs = append(srvs.SRVs, &net.SRV{Target: h, Port: port})
- }
- }
-
- if len(srvs.Endpoints) == 0 {
- plog.Fatalf("no endpoints found")
- }
-
- l, err := net.Listen("tcp", gatewayListenAddr)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-
- tp := tcpproxy.TCPProxy{
- Listener: l,
- Endpoints: srvs.SRVs,
- MonitorInterval: getewayRetryDelay,
- }
-
- // At this point, etcd gateway listener is initialized
- notifySystemd()
-
- tp.Run()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/grpc_proxy.go b/vendor/github.com/coreos/etcd/etcdmain/grpc_proxy.go
deleted file mode 100644
index 6a8e39f..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/grpc_proxy.go
+++ /dev/null
@@ -1,399 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdmain
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "math"
- "net"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/leasing"
- "github.com/coreos/etcd/clientv3/namespace"
- "github.com/coreos/etcd/clientv3/ordering"
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/debugutil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/proxy/grpcproxy"
-
- "github.com/coreos/pkg/capnslog"
- grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
- "github.com/soheilhy/cmux"
- "github.com/spf13/cobra"
- "google.golang.org/grpc"
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- grpcProxyListenAddr string
- grpcProxyMetricsListenAddr string
- grpcProxyEndpoints []string
- grpcProxyDNSCluster string
- grpcProxyInsecureDiscovery bool
- grpcProxyDataDir string
- grpcMaxCallSendMsgSize int
- grpcMaxCallRecvMsgSize int
-
- // tls for connecting to etcd
-
- grpcProxyCA string
- grpcProxyCert string
- grpcProxyKey string
- grpcProxyInsecureSkipTLSVerify bool
-
- // tls for clients connecting to proxy
-
- grpcProxyListenCA string
- grpcProxyListenCert string
- grpcProxyListenKey string
- grpcProxyListenAutoTLS bool
- grpcProxyListenCRL string
-
- grpcProxyAdvertiseClientURL string
- grpcProxyResolverPrefix string
- grpcProxyResolverTTL int
-
- grpcProxyNamespace string
- grpcProxyLeasing string
-
- grpcProxyEnablePprof bool
- grpcProxyEnableOrdering bool
-
- grpcProxyDebug bool
-)
-
-const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024
-
-func init() {
- rootCmd.AddCommand(newGRPCProxyCommand())
-}
-
-// newGRPCProxyCommand returns the cobra command for "grpc-proxy".
-func newGRPCProxyCommand() *cobra.Command {
- lpc := &cobra.Command{
- Use: "grpc-proxy <subcommand>",
- Short: "grpc-proxy related command",
- }
- lpc.AddCommand(newGRPCProxyStartCommand())
-
- return lpc
-}
-
-func newGRPCProxyStartCommand() *cobra.Command {
- cmd := cobra.Command{
- Use: "start",
- Short: "start the grpc proxy",
- Run: startGRPCProxy,
- }
-
- cmd.Flags().StringVar(&grpcProxyListenAddr, "listen-addr", "127.0.0.1:23790", "listen address")
- cmd.Flags().StringVar(&grpcProxyDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster")
- cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for /metrics requests on an additional interface")
- cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records")
- cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints")
- cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)")
- cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)")
- cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints")
- cmd.Flags().StringVar(&grpcProxyNamespace, "namespace", "", "string to prefix to all keys for namespacing requests")
- cmd.Flags().BoolVar(&grpcProxyEnablePprof, "enable-pprof", false, `Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"`)
- cmd.Flags().StringVar(&grpcProxyDataDir, "data-dir", "default.proxy", "Data directory for persistent data")
- cmd.Flags().IntVar(&grpcMaxCallSendMsgSize, "max-send-bytes", defaultGRPCMaxCallSendMsgSize, "message send limits in bytes (default value is 1.5 MiB)")
- cmd.Flags().IntVar(&grpcMaxCallRecvMsgSize, "max-recv-bytes", math.MaxInt32, "message receive limits in bytes (default value is math.MaxInt32)")
-
- // client TLS for connecting to server
- cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file")
- cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file")
- cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle")
- cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd server TLS certificates")
-
- // client TLS for connecting to proxy
- cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file")
- cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file")
- cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle")
- cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates")
- cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.")
-
- // experimental flags
- cmd.Flags().BoolVar(&grpcProxyEnableOrdering, "experimental-serializable-ordering", false, "Ensure serializable reads have monotonically increasing store revisions across endpoints.")
- cmd.Flags().StringVar(&grpcProxyLeasing, "experimental-leasing-prefix", "", "leasing metadata prefix for disconnected linearized reads.")
-
- cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.")
-
- return &cmd
-}
-
-func startGRPCProxy(cmd *cobra.Command, args []string) {
- checkArgs()
-
- capnslog.SetGlobalLogLevel(capnslog.INFO)
- if grpcProxyDebug {
- capnslog.SetGlobalLogLevel(capnslog.DEBUG)
- grpc.EnableTracing = true
- // enable info, warning, error
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
- } else {
- // only discard info
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
- }
-
- tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey)
- if tlsinfo == nil && grpcProxyListenAutoTLS {
- host := []string{"https://" + grpcProxyListenAddr}
- dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy")
- autoTLS, err := transport.SelfCert(dir, host)
- if err != nil {
- plog.Fatal(err)
- }
- tlsinfo = &autoTLS
- }
- if tlsinfo != nil {
- plog.Infof("ServerTLS: %s", tlsinfo)
- }
- m := mustListenCMux(tlsinfo)
-
- grpcl := m.Match(cmux.HTTP2())
- defer func() {
- grpcl.Close()
- plog.Infof("stopping listening for grpc-proxy client requests on %s", grpcProxyListenAddr)
- }()
-
- client := mustNewClient()
-
- srvhttp, httpl := mustHTTPListener(m, tlsinfo, client)
- errc := make(chan error)
- go func() { errc <- newGRPCProxyServer(client).Serve(grpcl) }()
- go func() { errc <- srvhttp.Serve(httpl) }()
- go func() { errc <- m.Serve() }()
- if len(grpcProxyMetricsListenAddr) > 0 {
- mhttpl := mustMetricsListener(tlsinfo)
- go func() {
- mux := http.NewServeMux()
- etcdhttp.HandlePrometheus(mux)
- grpcproxy.HandleHealth(mux, client)
- plog.Fatal(http.Serve(mhttpl, mux))
- }()
- }
-
- // grpc-proxy is initialized, ready to serve
- notifySystemd()
-
- fmt.Fprintln(os.Stderr, <-errc)
- os.Exit(1)
-}
-
-func checkArgs() {
- if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL < 1 {
- fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-ttl %d", grpcProxyResolverTTL))
- os.Exit(1)
- }
- if grpcProxyResolverPrefix == "" && grpcProxyResolverTTL > 0 {
- fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-prefix %q", grpcProxyResolverPrefix))
- os.Exit(1)
- }
- if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL > 0 && grpcProxyAdvertiseClientURL == "" {
- fmt.Fprintln(os.Stderr, fmt.Errorf("invalid advertise-client-url %q", grpcProxyAdvertiseClientURL))
- os.Exit(1)
- }
-}
-
-func mustNewClient() *clientv3.Client {
- srvs := discoverEndpoints(grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery)
- eps := srvs.Endpoints
- if len(eps) == 0 {
- eps = grpcProxyEndpoints
- }
- cfg, err := newClientCfg(eps)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- cfg.DialOptions = append(cfg.DialOptions,
- grpc.WithUnaryInterceptor(grpcproxy.AuthUnaryClientInterceptor))
- cfg.DialOptions = append(cfg.DialOptions,
- grpc.WithStreamInterceptor(grpcproxy.AuthStreamClientInterceptor))
- client, err := clientv3.New(*cfg)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- return client
-}
-
-func newClientCfg(eps []string) (*clientv3.Config, error) {
- // set tls if any one tls option set
- cfg := clientv3.Config{
- Endpoints: eps,
- DialTimeout: 5 * time.Second,
- }
-
- if grpcMaxCallSendMsgSize > 0 {
- cfg.MaxCallSendMsgSize = grpcMaxCallSendMsgSize
- }
- if grpcMaxCallRecvMsgSize > 0 {
- cfg.MaxCallRecvMsgSize = grpcMaxCallRecvMsgSize
- }
-
- tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey)
- if tls == nil && grpcProxyInsecureSkipTLSVerify {
- tls = &transport.TLSInfo{}
- }
- if tls != nil {
- clientTLS, err := tls.ClientConfig()
- if err != nil {
- return nil, err
- }
- clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify
- cfg.TLS = clientTLS
- plog.Infof("ClientTLS: %s", tls)
- }
- return &cfg, nil
-}
-
-func newTLS(ca, cert, key string) *transport.TLSInfo {
- if ca == "" && cert == "" && key == "" {
- return nil
- }
- return &transport.TLSInfo{CAFile: ca, CertFile: cert, KeyFile: key}
-}
-
-func mustListenCMux(tlsinfo *transport.TLSInfo) cmux.CMux {
- l, err := net.Listen("tcp", grpcProxyListenAddr)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-
- if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- if tlsinfo != nil {
- tlsinfo.CRLFile = grpcProxyListenCRL
- if l, err = transport.NewTLSListener(l, tlsinfo); err != nil {
- plog.Fatal(err)
- }
- }
-
- plog.Infof("listening for grpc-proxy client requests on %s", grpcProxyListenAddr)
- return cmux.New(l)
-}
-
-func newGRPCProxyServer(client *clientv3.Client) *grpc.Server {
- if grpcProxyEnableOrdering {
- vf := ordering.NewOrderViolationSwitchEndpointClosure(*client)
- client.KV = ordering.NewKV(client.KV, vf)
- plog.Infof("waiting for linearized read from cluster to recover ordering")
- for {
- _, err := client.KV.Get(context.TODO(), "_", clientv3.WithKeysOnly())
- if err == nil {
- break
- }
- plog.Warningf("ordering recovery failed, retrying in 1s (%v)", err)
- time.Sleep(time.Second)
- }
- }
-
- if len(grpcProxyNamespace) > 0 {
- client.KV = namespace.NewKV(client.KV, grpcProxyNamespace)
- client.Watcher = namespace.NewWatcher(client.Watcher, grpcProxyNamespace)
- client.Lease = namespace.NewLease(client.Lease, grpcProxyNamespace)
- }
-
- if len(grpcProxyLeasing) > 0 {
- client.KV, _, _ = leasing.NewKV(client, grpcProxyLeasing)
- }
-
- kvp, _ := grpcproxy.NewKvProxy(client)
- watchp, _ := grpcproxy.NewWatchProxy(client)
- if grpcProxyResolverPrefix != "" {
- grpcproxy.Register(client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL)
- }
- clusterp, _ := grpcproxy.NewClusterProxy(client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix)
- leasep, _ := grpcproxy.NewLeaseProxy(client)
- mainp := grpcproxy.NewMaintenanceProxy(client)
- authp := grpcproxy.NewAuthProxy(client)
- electionp := grpcproxy.NewElectionProxy(client)
- lockp := grpcproxy.NewLockProxy(client)
-
- server := grpc.NewServer(
- grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
- grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
- grpc.MaxConcurrentStreams(math.MaxUint32),
- )
-
- pb.RegisterKVServer(server, kvp)
- pb.RegisterWatchServer(server, watchp)
- pb.RegisterClusterServer(server, clusterp)
- pb.RegisterLeaseServer(server, leasep)
- pb.RegisterMaintenanceServer(server, mainp)
- pb.RegisterAuthServer(server, authp)
- v3electionpb.RegisterElectionServer(server, electionp)
- v3lockpb.RegisterLockServer(server, lockp)
-
- // set zero values for metrics registered for this grpc server
- grpc_prometheus.Register(server)
-
- return server
-}
-
-func mustHTTPListener(m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client) (*http.Server, net.Listener) {
- httpmux := http.NewServeMux()
- httpmux.HandleFunc("/", http.NotFound)
- etcdhttp.HandlePrometheus(httpmux)
- grpcproxy.HandleHealth(httpmux, c)
- if grpcProxyEnablePprof {
- for p, h := range debugutil.PProfHandlers() {
- httpmux.Handle(p, h)
- }
- plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
- }
- srvhttp := &http.Server{Handler: httpmux}
-
- if tlsinfo == nil {
- return srvhttp, m.Match(cmux.HTTP1())
- }
-
- srvTLS, err := tlsinfo.ServerConfig()
- if err != nil {
- plog.Fatalf("could not setup TLS (%v)", err)
- }
- srvhttp.TLSConfig = srvTLS
- return srvhttp, m.Match(cmux.Any())
-}
-
-func mustMetricsListener(tlsinfo *transport.TLSInfo) net.Listener {
- murl, err := url.Parse(grpcProxyMetricsListenAddr)
- if err != nil {
- fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr)
- os.Exit(1)
- }
- ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsinfo)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- plog.Info("grpc-proxy: listening for metrics on ", murl.String())
- return ml
-}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/help.go b/vendor/github.com/coreos/etcd/etcdmain/help.go
deleted file mode 100644
index c64dab3..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/help.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdmain
-
-import (
- "strconv"
-
- "github.com/coreos/etcd/embed"
-)
-
-var (
- usageline = `usage: etcd [flags]
- start an etcd server
-
- etcd --version
- show the version of etcd
-
- etcd -h | --help
- show the help information about etcd
-
- etcd --config-file
- path to the server configuration file
-
- etcd gateway
- run the stateless pass-through etcd TCP connection forwarding proxy
-
- etcd grpc-proxy
- run the stateless etcd v3 gRPC L7 reverse proxy
- `
- flagsline = `
-member flags:
-
- --name 'default'
- human-readable name for this member.
- --data-dir '${name}.etcd'
- path to the data directory.
- --wal-dir ''
- path to the dedicated wal directory.
- --snapshot-count '100000'
- number of committed transactions to trigger a snapshot to disk.
- --heartbeat-interval '100'
- time (in milliseconds) of a heartbeat interval.
- --election-timeout '1000'
- time (in milliseconds) for an election to timeout. See tuning documentation for details.
- --initial-election-tick-advance 'true'
- whether to fast-forward initial election ticks on boot for faster election.
- --listen-peer-urls 'http://localhost:2380'
- list of URLs to listen on for peer traffic.
- --listen-client-urls 'http://localhost:2379'
- list of URLs to listen on for client traffic.
- --max-snapshots '` + strconv.Itoa(embed.DefaultMaxSnapshots) + `'
- maximum number of snapshot files to retain (0 is unlimited).
- --max-wals '` + strconv.Itoa(embed.DefaultMaxWALs) + `'
- maximum number of wal files to retain (0 is unlimited).
- --cors ''
- comma-separated whitelist of origins for CORS (cross-origin resource sharing).
- --quota-backend-bytes '0'
- raise alarms when backend size exceeds the given quota (0 defaults to low space quota).
- --max-txn-ops '128'
- maximum number of operations permitted in a transaction.
- --max-request-bytes '1572864'
- maximum client request size in bytes the server will accept.
- --grpc-keepalive-min-time '5s'
- minimum duration interval that a client should wait before pinging server.
- --grpc-keepalive-interval '2h'
- frequency duration of server-to-client ping to check if a connection is alive (0 to disable).
- --grpc-keepalive-timeout '20s'
- additional duration of wait before closing a non-responsive connection (0 to disable).
-
-clustering flags:
-
- --initial-advertise-peer-urls 'http://localhost:2380'
- list of this member's peer URLs to advertise to the rest of the cluster.
- --initial-cluster 'default=http://localhost:2380'
- initial cluster configuration for bootstrapping.
- --initial-cluster-state 'new'
- initial cluster state ('new' or 'existing').
- --initial-cluster-token 'etcd-cluster'
- initial cluster token for the etcd cluster during bootstrap.
- Specifying this can protect you from unintended cross-cluster interaction when running multiple clusters.
- --advertise-client-urls 'http://localhost:2379'
- list of this member's client URLs to advertise to the public.
- The client URLs advertised should be accessible to machines that talk to etcd cluster. etcd client libraries parse these URLs to connect to the cluster.
- --discovery ''
- discovery URL used to bootstrap the cluster.
- --discovery-fallback 'proxy'
- expected behavior ('exit' or 'proxy') when discovery services fails.
- "proxy" supports v2 API only.
- --discovery-proxy ''
- HTTP proxy to use for traffic to discovery service.
- --discovery-srv ''
- dns srv domain used to bootstrap the cluster.
- --strict-reconfig-check '` + strconv.FormatBool(embed.DefaultStrictReconfigCheck) + `'
- reject reconfiguration requests that would cause quorum loss.
- --auto-compaction-retention '0'
- auto compaction retention length. 0 means disable auto compaction.
- --auto-compaction-mode 'periodic'
- interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.
- --enable-v2 '` + strconv.FormatBool(embed.DefaultEnableV2) + `'
- Accept etcd V2 client requests.
-
-proxy flags:
- "proxy" supports v2 API only.
-
- --proxy 'off'
- proxy mode setting ('off', 'readonly' or 'on').
- --proxy-failure-wait 5000
- time (in milliseconds) an endpoint will be held in a failed state.
- --proxy-refresh-interval 30000
- time (in milliseconds) of the endpoints refresh interval.
- --proxy-dial-timeout 1000
- time (in milliseconds) for a dial to timeout.
- --proxy-write-timeout 5000
- time (in milliseconds) for a write to timeout.
- --proxy-read-timeout 0
- time (in milliseconds) for a read to timeout.
-
-
-security flags:
-
- --ca-file '' [DEPRECATED]
- path to the client server TLS CA file. '-ca-file ca.crt' could be replaced by '-trusted-ca-file ca.crt -client-cert-auth' and etcd will perform the same.
- --cert-file ''
- path to the client server TLS cert file.
- --key-file ''
- path to the client server TLS key file.
- --client-cert-auth 'false'
- enable client cert authentication.
- --client-crl-file ''
- path to the client certificate revocation list file.
- --trusted-ca-file ''
- path to the client server TLS trusted CA cert file.
- --auto-tls 'false'
- client TLS using generated certificates.
- --peer-ca-file '' [DEPRECATED]
- path to the peer server TLS CA file. '-peer-ca-file ca.crt' could be replaced by '-peer-trusted-ca-file ca.crt -peer-client-cert-auth' and etcd will perform the same.
- --peer-cert-file ''
- path to the peer server TLS cert file.
- --peer-key-file ''
- path to the peer server TLS key file.
- --peer-client-cert-auth 'false'
- enable peer client cert authentication.
- --peer-trusted-ca-file ''
- path to the peer server TLS trusted CA file.
- --peer-cert-allowed-cn ''
- Required CN for client certs connecting to the peer endpoint.
- --peer-auto-tls 'false'
- peer TLS using self-generated certificates if --peer-key-file and --peer-cert-file are not provided.
- --peer-crl-file ''
- path to the peer certificate revocation list file.
- --cipher-suites ''
- comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).
-
-logging flags
-
- --debug 'false'
- enable debug-level logging for etcd.
- --log-package-levels ''
- specify a particular log level for each etcd package (eg: 'etcdmain=CRITICAL,etcdserver=DEBUG').
- --log-output 'default'
- specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
-
-unsafe flags:
-
-Please be CAUTIOUS when using unsafe flags because it will break the guarantees
-given by the consensus protocol.
-
- --force-new-cluster 'false'
- force to create a new one-member cluster.
-
-profiling flags:
- --enable-pprof 'false'
- Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"
- --metrics 'basic'
- Set level of detail for exported metrics, specify 'extensive' to include histogram metrics.
- --listen-metrics-urls ''
- List of URLs to listen on for metrics.
-
-auth flags:
- --auth-token 'simple'
- Specify a v3 authentication token type and its options ('simple' or 'jwt').
-
-experimental flags:
- --experimental-initial-corrupt-check 'false'
- enable to check data corruption before serving any client/peer traffic.
- --experimental-corrupt-check-time '0s'
- duration of time between cluster corruption check passes.
- --experimental-enable-v2v3 ''
- serve v2 requests through the v3 backend under a given prefix.
-`
-)
diff --git a/vendor/github.com/coreos/etcd/etcdmain/main.go b/vendor/github.com/coreos/etcd/etcdmain/main.go
deleted file mode 100644
index 06bbae5..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/main.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdmain
-
-import (
- "fmt"
- "os"
- "strings"
-
- "github.com/coreos/go-systemd/daemon"
- systemdutil "github.com/coreos/go-systemd/util"
-)
-
-func Main() {
- checkSupportArch()
-
- if len(os.Args) > 1 {
- cmd := os.Args[1]
- if covArgs := os.Getenv("ETCDCOV_ARGS"); len(covArgs) > 0 {
- args := strings.Split(os.Getenv("ETCDCOV_ARGS"), "\xe7\xcd")[1:]
- rootCmd.SetArgs(args)
- cmd = "grpc-proxy"
- }
- switch cmd {
- case "gateway", "grpc-proxy":
- if err := rootCmd.Execute(); err != nil {
- fmt.Fprint(os.Stderr, err)
- os.Exit(1)
- }
- return
- }
- }
-
- startEtcdOrProxyV2()
-}
-
-func notifySystemd() {
- if !systemdutil.IsRunningSystemd() {
- return
- }
- sent, err := daemon.SdNotify(false, "READY=1")
- if err != nil {
- plog.Errorf("failed to notify systemd for readiness: %v", err)
- }
- if !sent {
- plog.Errorf("forgot to set Type=notify in systemd service file?")
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdmain/util.go b/vendor/github.com/coreos/etcd/etcdmain/util.go
deleted file mode 100644
index 9657271..0000000
--- a/vendor/github.com/coreos/etcd/etcdmain/util.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdmain
-
-import (
- "fmt"
- "os"
-
- "github.com/coreos/etcd/pkg/srv"
- "github.com/coreos/etcd/pkg/transport"
-)
-
-func discoverEndpoints(dns string, ca string, insecure bool) (s srv.SRVClients) {
- if dns == "" {
- return s
- }
- srvs, err := srv.GetClient("etcd-client", dns)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
- endpoints := srvs.Endpoints
- plog.Infof("discovered the cluster %s from %s", endpoints, dns)
- if insecure {
- return *srvs
- }
- // confirm TLS connections are good
- tlsInfo := transport.TLSInfo{
- TrustedCAFile: ca,
- ServerName: dns,
- }
- plog.Infof("validating discovered endpoints %v", endpoints)
- endpoints, err = transport.ValidateSecureEndpoints(tlsInfo, endpoints)
- if err != nil {
- plog.Warningf("%v", err)
- }
- plog.Infof("using discovered endpoints %v", endpoints)
-
- // map endpoints back to SRVClients struct with SRV data
- eps := make(map[string]struct{})
- for _, ep := range endpoints {
- eps[ep] = struct{}{}
- }
- for i := range srvs.Endpoints {
- if _, ok := eps[srvs.Endpoints[i]]; !ok {
- continue
- }
- s.Endpoints = append(s.Endpoints, srvs.Endpoints[i])
- s.SRVs = append(s.SRVs, srvs.SRVs[i])
- }
-
- return s
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go
deleted file mode 100644
index eb34383..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package api
-
-import (
- "sync"
-
- "github.com/coreos/etcd/version"
- "github.com/coreos/go-semver/semver"
- "github.com/coreos/pkg/capnslog"
-)
-
-type Capability string
-
-const (
- AuthCapability Capability = "auth"
- V3rpcCapability Capability = "v3rpc"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
-
- // capabilityMaps is a static map of version to capability map.
- capabilityMaps = map[string]map[Capability]bool{
- "3.0.0": {AuthCapability: true, V3rpcCapability: true},
- "3.1.0": {AuthCapability: true, V3rpcCapability: true},
- "3.2.0": {AuthCapability: true, V3rpcCapability: true},
- "3.3.0": {AuthCapability: true, V3rpcCapability: true},
- }
-
- enableMapMu sync.RWMutex
- // enabledMap points to a map in capabilityMaps
- enabledMap map[Capability]bool
-
- curVersion *semver.Version
-)
-
-func init() {
- enabledMap = map[Capability]bool{
- AuthCapability: true,
- V3rpcCapability: true,
- }
-}
-
-// UpdateCapability updates the enabledMap when the cluster version increases.
-func UpdateCapability(v *semver.Version) {
- if v == nil {
- // if recovered but version was never set by cluster
- return
- }
- enableMapMu.Lock()
- if curVersion != nil && !curVersion.LessThan(*v) {
- enableMapMu.Unlock()
- return
- }
- curVersion = v
- enabledMap = capabilityMaps[curVersion.String()]
- enableMapMu.Unlock()
- plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
-}
-
-func IsCapabilityEnabled(c Capability) bool {
- enableMapMu.RLock()
- defer enableMapMu.RUnlock()
- if enabledMap == nil {
- return false
- }
- return enabledMap[c]
-}
-
-func EnableCapability(c Capability) {
- enableMapMu.Lock()
- defer enableMapMu.Unlock()
- enabledMap[c] = true
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
deleted file mode 100644
index 654c258..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package api
-
-import (
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/go-semver/semver"
-)
-
-// Cluster is an interface representing a collection of members in one etcd cluster.
-type Cluster interface {
- // ID returns the cluster ID
- ID() types.ID
- // ClientURLs returns an aggregate set of all URLs on which this
- // cluster is listening for client requests
- ClientURLs() []string
- // Members returns a slice of members sorted by their ID
- Members() []*membership.Member
- // Member retrieves a particular member based on ID, or nil if the
- // member does not exist in the cluster
- Member(id types.ID) *membership.Member
- // Version is the cluster-wide minimum major.minor version.
- Version() *semver.Version
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/doc.go
deleted file mode 100644
index f44881b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
-package api
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
deleted file mode 100644
index f0d3b0b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdhttp
-
-import (
- "encoding/json"
- "expvar"
- "fmt"
- "net/http"
- "strings"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/pkg/logutil"
- "github.com/coreos/etcd/version"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp")
- mlog = logutil.NewMergeLogger(plog)
-)
-
-const (
- configPath = "/config"
- varsPath = "/debug/vars"
- versionPath = "/version"
-)
-
-// HandleBasic adds handlers to a mux for serving JSON etcd client requests
-// that do not access the v2 store.
-func HandleBasic(mux *http.ServeMux, server etcdserver.ServerPeer) {
- mux.HandleFunc(varsPath, serveVars)
- mux.HandleFunc(configPath+"/local/log", logHandleFunc)
- HandleMetricsHealth(mux, server)
- mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
-}
-
-func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- v := c.Version()
- if v != nil {
- fn(w, r, v.String())
- } else {
- fn(w, r, "not_decided")
- }
- }
-}
-
-func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
- if !allowMethod(w, r, "GET") {
- return
- }
- vs := version.Versions{
- Server: version.Version,
- Cluster: clusterV,
- }
-
- w.Header().Set("Content-Type", "application/json")
- b, err := json.Marshal(&vs)
- if err != nil {
- plog.Panicf("cannot marshal versions to json (%v)", err)
- }
- w.Write(b)
-}
-
-func logHandleFunc(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r, "PUT") {
- return
- }
-
- in := struct{ Level string }{}
-
- d := json.NewDecoder(r.Body)
- if err := d.Decode(&in); err != nil {
- WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
- return
- }
-
- logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
- if err != nil {
- WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
- return
- }
-
- plog.Noticef("globalLogLevel set to %q", logl.String())
- capnslog.SetGlobalLogLevel(logl)
- w.WriteHeader(http.StatusNoContent)
-}
-
-func serveVars(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r, "GET") {
- return
- }
-
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- fmt.Fprintf(w, "{\n")
- first := true
- expvar.Do(func(kv expvar.KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
-
-func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
- if m == r.Method {
- return true
- }
- w.Header().Set("Allow", m)
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return false
-}
-
-// WriteError logs and writes the given Error to the ResponseWriter
-// If Error is an etcdErr, it is rendered to the ResponseWriter
-// Otherwise, it is assumed to be a StatusInternalServerError
-func WriteError(w http.ResponseWriter, r *http.Request, err error) {
- if err == nil {
- return
- }
- switch e := err.(type) {
- case *etcdErr.Error:
- e.WriteTo(w)
- case *httptypes.HTTPError:
- if et := e.WriteTo(w); et != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
- }
- default:
- switch err {
- case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
- mlog.MergeError(err)
- default:
- mlog.MergeErrorf("got unexpected response error (%v)", err)
- }
- herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
- if et := herr.WriteTo(w); et != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go
deleted file mode 100644
index a03b626..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package etcdhttp implements HTTP transportation layer for etcdserver.
-package etcdhttp
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
deleted file mode 100644
index aeaf350..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdhttp
-
-import (
- "context"
- "encoding/json"
- "net/http"
- "time"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/raft"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
-)
-
-const (
- pathMetrics = "/metrics"
- PathHealth = "/health"
-)
-
-// HandleMetricsHealth registers metrics and health handlers.
-func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
- mux.Handle(pathMetrics, promhttp.Handler())
- mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
-}
-
-// HandlePrometheus registers prometheus handler on '/metrics'.
-func HandlePrometheus(mux *http.ServeMux) {
- mux.Handle(pathMetrics, promhttp.Handler())
-}
-
-// NewHealthHandler handles '/health' requests.
-func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- if r.Method != http.MethodGet {
- w.Header().Set("Allow", http.MethodGet)
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return
- }
- h := hfunc()
- d, _ := json.Marshal(h)
- if h.Health != "true" {
- http.Error(w, string(d), http.StatusServiceUnavailable)
- return
- }
- w.WriteHeader(http.StatusOK)
- w.Write(d)
- }
-}
-
-var (
- healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "health_success",
- Help: "The total number of successful health checks",
- })
- healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "health_failures",
- Help: "The total number of failed health checks",
- })
-)
-
-func init() {
- prometheus.MustRegister(healthSuccess)
- prometheus.MustRegister(healthFailed)
-}
-
-// Health defines etcd server health status.
-// TODO: remove manual parsing in etcdctl cluster-health
-type Health struct {
- Health string `json:"health"`
-}
-
-// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
-
-func checkHealth(srv etcdserver.ServerV2) Health {
- h := Health{Health: "true"}
-
- as := srv.Alarms()
- if len(as) > 0 {
- h.Health = "false"
- }
-
- if h.Health == "true" {
- if uint64(srv.Leader()) == raft.None {
- h.Health = "false"
- }
- }
-
- if h.Health == "true" {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- _, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
- cancel()
- if err != nil {
- h.Health = "false"
- }
- }
-
- if h.Health == "true" {
- healthSuccess.Inc()
- } else {
- healthFailed.Inc()
- }
- return h
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go
deleted file mode 100644
index 0a9213b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdhttp
-
-import (
- "encoding/json"
- "net/http"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/lease/leasehttp"
- "github.com/coreos/etcd/rafthttp"
-)
-
-const (
- peerMembersPrefix = "/members"
-)
-
-// NewPeerHandler generates an http.Handler to handle etcd peer requests.
-func NewPeerHandler(s etcdserver.ServerPeer) http.Handler {
- return newPeerHandler(s.Cluster(), s.RaftHandler(), s.LeaseHandler())
-}
-
-func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler http.Handler) http.Handler {
- mh := &peerMembersHandler{
- cluster: cluster,
- }
-
- mux := http.NewServeMux()
- mux.HandleFunc("/", http.NotFound)
- mux.Handle(rafthttp.RaftPrefix, raftHandler)
- mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
- mux.Handle(peerMembersPrefix, mh)
- if leaseHandler != nil {
- mux.Handle(leasehttp.LeasePrefix, leaseHandler)
- mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
- }
- mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
- return mux
-}
-
-type peerMembersHandler struct {
- cluster api.Cluster
-}
-
-func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r, "GET") {
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
-
- if r.URL.Path != peerMembersPrefix {
- http.Error(w, "bad path", http.StatusBadRequest)
- return
- }
- ms := h.cluster.Members()
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(ms); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go
deleted file mode 100644
index fa0bcca..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "fmt"
- "net/http"
-
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
-)
-
-func capabilityHandler(c api.Capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- if !api.IsCapabilityEnabled(c) {
- notCapable(w, r, c)
- return
- }
- fn(w, r)
- }
-}
-
-func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) {
- herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
- if err := herr.WriteTo(w); err != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
deleted file mode 100644
index 6aaf3db..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
+++ /dev/null
@@ -1,719 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "path"
- "strconv"
- "strings"
- "time"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/auth"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/store"
-
- "github.com/jonboulle/clockwork"
-)
-
-const (
- authPrefix = "/v2/auth"
- keysPrefix = "/v2/keys"
- machinesPrefix = "/v2/machines"
- membersPrefix = "/v2/members"
- statsPrefix = "/v2/stats"
-)
-
-// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
-func NewClientHandler(server etcdserver.ServerPeer, timeout time.Duration) http.Handler {
- mux := http.NewServeMux()
- etcdhttp.HandleBasic(mux, server)
- handleV2(mux, server, timeout)
- return requestLogger(mux)
-}
-
-func handleV2(mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) {
- sec := auth.NewStore(server, timeout)
- kh := &keysHandler{
- sec: sec,
- server: server,
- cluster: server.Cluster(),
- timeout: timeout,
- clientCertAuthEnabled: server.ClientCertAuthEnabled(),
- }
-
- sh := &statsHandler{
- stats: server,
- }
-
- mh := &membersHandler{
- sec: sec,
- server: server,
- cluster: server.Cluster(),
- timeout: timeout,
- clock: clockwork.NewRealClock(),
- clientCertAuthEnabled: server.ClientCertAuthEnabled(),
- }
-
- mah := &machinesHandler{cluster: server.Cluster()}
-
- sech := &authHandler{
- sec: sec,
- cluster: server.Cluster(),
- clientCertAuthEnabled: server.ClientCertAuthEnabled(),
- }
- mux.HandleFunc("/", http.NotFound)
- mux.Handle(keysPrefix, kh)
- mux.Handle(keysPrefix+"/", kh)
- mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
- mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
- mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
- mux.Handle(membersPrefix, mh)
- mux.Handle(membersPrefix+"/", mh)
- mux.Handle(machinesPrefix, mah)
- handleAuth(mux, sech)
-}
-
-type keysHandler struct {
- sec auth.Store
- server etcdserver.ServerV2
- cluster api.Cluster
- timeout time.Duration
- clientCertAuthEnabled bool
-}
-
-func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") {
- return
- }
-
- w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
-
- ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
- defer cancel()
- clock := clockwork.NewRealClock()
- startTime := clock.Now()
- rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
- if err != nil {
- writeKeyError(w, err)
- return
- }
- // The path must be valid at this point (we've parsed the request successfully).
- if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) {
- writeKeyNoAuth(w)
- return
- }
- if !rr.Wait {
- reportRequestReceived(rr)
- }
- resp, err := h.server.Do(ctx, rr)
- if err != nil {
- err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
- writeKeyError(w, err)
- reportRequestFailed(rr, err)
- return
- }
- switch {
- case resp.Event != nil:
- if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil {
- // Should never be reached
- plog.Errorf("error writing event (%v)", err)
- }
- reportRequestCompleted(rr, resp, startTime)
- case resp.Watcher != nil:
- ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
- defer cancel()
- handleKeyWatch(ctx, w, resp, rr.Stream)
- default:
- writeKeyError(w, errors.New("received response with no Event/Watcher!"))
- }
-}
-
-type machinesHandler struct {
- cluster api.Cluster
-}
-
-func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET", "HEAD") {
- return
- }
- endpoints := h.cluster.ClientURLs()
- w.Write([]byte(strings.Join(endpoints, ", ")))
-}
-
-type membersHandler struct {
- sec auth.Store
- server etcdserver.ServerV2
- cluster api.Cluster
- timeout time.Duration
- clock clockwork.Clock
- clientCertAuthEnabled bool
-}
-
-func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") {
- return
- }
- if !hasWriteRootAccess(h.sec, r, h.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
-
- ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
- defer cancel()
-
- switch r.Method {
- case "GET":
- switch trimPrefix(r.URL.Path, membersPrefix) {
- case "":
- mc := newMemberCollection(h.cluster.Members())
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(mc); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
- case "leader":
- id := h.server.Leader()
- if id == 0 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election"))
- return
- }
- m := newMember(h.cluster.Member(id))
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(m); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
- default:
- writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found"))
- }
- case "POST":
- req := httptypes.MemberCreateRequest{}
- if ok := unmarshalRequest(r, &req, w); !ok {
- return
- }
- now := h.clock.Now()
- m := membership.NewMember("", req.PeerURLs, "", &now)
- _, err := h.server.AddMember(ctx, *m)
- switch {
- case err == membership.ErrIDExists || err == membership.ErrPeerURLexists:
- writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
- return
- case err != nil:
- plog.Errorf("error adding member %s (%v)", m.ID, err)
- writeError(w, r, err)
- return
- }
- res := newMember(m)
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(http.StatusCreated)
- if err := json.NewEncoder(w).Encode(res); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
- case "DELETE":
- id, ok := getID(r.URL.Path, w)
- if !ok {
- return
- }
- _, err := h.server.RemoveMember(ctx, uint64(id))
- switch {
- case err == membership.ErrIDRemoved:
- writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
- case err == membership.ErrIDNotFound:
- writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
- case err != nil:
- plog.Errorf("error removing member %s (%v)", id, err)
- writeError(w, r, err)
- default:
- w.WriteHeader(http.StatusNoContent)
- }
- case "PUT":
- id, ok := getID(r.URL.Path, w)
- if !ok {
- return
- }
- req := httptypes.MemberUpdateRequest{}
- if ok := unmarshalRequest(r, &req, w); !ok {
- return
- }
- m := membership.Member{
- ID: id,
- RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()},
- }
- _, err := h.server.UpdateMember(ctx, m)
- switch {
- case err == membership.ErrPeerURLexists:
- writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
- case err == membership.ErrIDNotFound:
- writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
- case err != nil:
- plog.Errorf("error updating member %s (%v)", m.ID, err)
- writeError(w, r, err)
- default:
- w.WriteHeader(http.StatusNoContent)
- }
- }
-}
-
-type statsHandler struct {
- stats stats.Stats
-}
-
-func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- w.Header().Set("Content-Type", "application/json")
- w.Write(h.stats.StoreStats())
-}
-
-func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- w.Header().Set("Content-Type", "application/json")
- w.Write(h.stats.SelfStats())
-}
-
-func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- stats := h.stats.LeaderStats()
- if stats == nil {
- etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
- return
- }
- w.Header().Set("Content-Type", "application/json")
- w.Write(stats)
-}
-
-// parseKeyRequest converts a received http.Request on keysPrefix to
-// a server Request, performing validation of supplied fields as appropriate.
-// If any validation fails, an empty Request and non-nil error is returned.
-func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
- var noValueOnSuccess bool
- emptyReq := etcdserverpb.Request{}
-
- err := r.ParseForm()
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidForm,
- err.Error(),
- )
- }
-
- if !strings.HasPrefix(r.URL.Path, keysPrefix) {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidForm,
- "incorrect key prefix",
- )
- }
- p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):])
-
- var pIdx, wIdx uint64
- if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeIndexNaN,
- `invalid value for "prevIndex"`,
- )
- }
- if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeIndexNaN,
- `invalid value for "waitIndex"`,
- )
- }
-
- var rec, sort, wait, dir, quorum, stream bool
- if rec, err = getBool(r.Form, "recursive"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "recursive"`,
- )
- }
- if sort, err = getBool(r.Form, "sorted"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "sorted"`,
- )
- }
- if wait, err = getBool(r.Form, "wait"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "wait"`,
- )
- }
- // TODO(jonboulle): define what parameters dir is/isn't compatible with?
- if dir, err = getBool(r.Form, "dir"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "dir"`,
- )
- }
- if quorum, err = getBool(r.Form, "quorum"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "quorum"`,
- )
- }
- if stream, err = getBool(r.Form, "stream"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "stream"`,
- )
- }
-
- if wait && r.Method != "GET" {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `"wait" can only be used with GET requests`,
- )
- }
-
- pV := r.FormValue("prevValue")
- if _, ok := r.Form["prevValue"]; ok && pV == "" {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodePrevValueRequired,
- `"prevValue" cannot be empty`,
- )
- }
-
- if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "noValueOnSuccess"`,
- )
- }
-
- // TTL is nullable, so leave it null if not specified
- // or an empty string
- var ttl *uint64
- if len(r.FormValue("ttl")) > 0 {
- i, err := getUint64(r.Form, "ttl")
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeTTLNaN,
- `invalid value for "ttl"`,
- )
- }
- ttl = &i
- }
-
- // prevExist is nullable, so leave it null if not specified
- var pe *bool
- if _, ok := r.Form["prevExist"]; ok {
- bv, err := getBool(r.Form, "prevExist")
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- "invalid value for prevExist",
- )
- }
- pe = &bv
- }
-
- // refresh is nullable, so leave it null if not specified
- var refresh *bool
- if _, ok := r.Form["refresh"]; ok {
- bv, err := getBool(r.Form, "refresh")
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- "invalid value for refresh",
- )
- }
- refresh = &bv
- if refresh != nil && *refresh {
- val := r.FormValue("value")
- if _, ok := r.Form["value"]; ok && val != "" {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeRefreshValue,
- `A value was provided on a refresh`,
- )
- }
- if ttl == nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeRefreshTTLRequired,
- `No TTL value set`,
- )
- }
- }
- }
-
- rr := etcdserverpb.Request{
- Method: r.Method,
- Path: p,
- Val: r.FormValue("value"),
- Dir: dir,
- PrevValue: pV,
- PrevIndex: pIdx,
- PrevExist: pe,
- Wait: wait,
- Since: wIdx,
- Recursive: rec,
- Sorted: sort,
- Quorum: quorum,
- Stream: stream,
- }
-
- if pe != nil {
- rr.PrevExist = pe
- }
-
- if refresh != nil {
- rr.Refresh = refresh
- }
-
- // Null TTL is equivalent to unset Expiration
- if ttl != nil {
- expr := time.Duration(*ttl) * time.Second
- rr.Expiration = clock.Now().Add(expr).UnixNano()
- }
-
- return rr, noValueOnSuccess, nil
-}
-
-// writeKeyEvent trims the prefix of key path in a single Event under
-// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
-// ResponseWriter, along with the appropriate headers.
-func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error {
- ev := resp.Event
- if ev == nil {
- return errors.New("cannot write empty Event!")
- }
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex))
- w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
- w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
-
- if ev.IsCreated() {
- w.WriteHeader(http.StatusCreated)
- }
-
- ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
- if noValueOnSuccess &&
- (ev.Action == store.Set || ev.Action == store.CompareAndSwap ||
- ev.Action == store.Create || ev.Action == store.Update) {
- ev.Node = nil
- ev.PrevNode = nil
- }
- return json.NewEncoder(w).Encode(ev)
-}
-
-func writeKeyNoAuth(w http.ResponseWriter) {
- e := etcdErr.NewError(etcdErr.EcodeUnauthorized, "Insufficient credentials", 0)
- e.WriteTo(w)
-}
-
-// writeKeyError logs and writes the given Error to the ResponseWriter.
-// If Error is not an etcdErr, the error will be converted to an etcd error.
-func writeKeyError(w http.ResponseWriter, err error) {
- if err == nil {
- return
- }
- switch e := err.(type) {
- case *etcdErr.Error:
- e.WriteTo(w)
- default:
- switch err {
- case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
- mlog.MergeError(err)
- default:
- mlog.MergeErrorf("got unexpected response error (%v)", err)
- }
- ee := etcdErr.NewError(etcdErr.EcodeRaftInternal, err.Error(), 0)
- ee.WriteTo(w)
- }
-}
-
-func handleKeyWatch(ctx context.Context, w http.ResponseWriter, resp etcdserver.Response, stream bool) {
- wa := resp.Watcher
- defer wa.Remove()
- ech := wa.EventChan()
- var nch <-chan bool
- if x, ok := w.(http.CloseNotifier); ok {
- nch = x.CloseNotify()
- }
-
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex()))
- w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
- w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
- w.WriteHeader(http.StatusOK)
-
- // Ensure headers are flushed early, in case of long polling
- w.(http.Flusher).Flush()
-
- for {
- select {
- case <-nch:
- // Client closed connection. Nothing to do.
- return
- case <-ctx.Done():
- // Timed out. net/http will close the connection for us, so nothing to do.
- return
- case ev, ok := <-ech:
- if !ok {
- // If the channel is closed this may be an indication of
- // that notifications are much more than we are able to
- // send to the client in time. Then we simply end streaming.
- return
- }
- ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
- if err := json.NewEncoder(w).Encode(ev); err != nil {
- // Should never be reached
- plog.Warningf("error writing event (%v)", err)
- return
- }
- if !stream {
- return
- }
- w.(http.Flusher).Flush()
- }
- }
-}
-
-func trimEventPrefix(ev *store.Event, prefix string) *store.Event {
- if ev == nil {
- return nil
- }
- // Since the *Event may reference one in the store history
- // history, we must copy it before modifying
- e := ev.Clone()
- trimNodeExternPrefix(e.Node, prefix)
- trimNodeExternPrefix(e.PrevNode, prefix)
- return e
-}
-
-func trimNodeExternPrefix(n *store.NodeExtern, prefix string) {
- if n == nil {
- return
- }
- n.Key = strings.TrimPrefix(n.Key, prefix)
- for _, nn := range n.Nodes {
- trimNodeExternPrefix(nn, prefix)
- }
-}
-
-func trimErrorPrefix(err error, prefix string) error {
- if e, ok := err.(*etcdErr.Error); ok {
- e.Cause = strings.TrimPrefix(e.Cause, prefix)
- }
- return err
-}
-
-func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
- ctype := r.Header.Get("Content-Type")
- semicolonPosition := strings.Index(ctype, ";")
- if semicolonPosition != -1 {
- ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
- }
- if ctype != "application/json" {
- writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
- return false
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
- return false
- }
- if err := req.UnmarshalJSON(b); err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
- return false
- }
- return true
-}
-
-func getID(p string, w http.ResponseWriter) (types.ID, bool) {
- idStr := trimPrefix(p, membersPrefix)
- if idStr == "" {
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return 0, false
- }
- id, err := types.IDFromString(idStr)
- if err != nil {
- writeError(w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
- return 0, false
- }
- return id, true
-}
-
-// getUint64 extracts a uint64 by the given key from a Form. If the key does
-// not exist in the form, 0 is returned. If the key exists but the value is
-// badly formed, an error is returned. If multiple values are present only the
-// first is considered.
-func getUint64(form url.Values, key string) (i uint64, err error) {
- if vals, ok := form[key]; ok {
- i, err = strconv.ParseUint(vals[0], 10, 64)
- }
- return
-}
-
-// getBool extracts a bool by the given key from a Form. If the key does not
-// exist in the form, false is returned. If the key exists but the value is
-// badly formed, an error is returned. If multiple values are present only the
-// first is considered.
-func getBool(form url.Values, key string) (b bool, err error) {
- if vals, ok := form[key]; ok {
- b, err = strconv.ParseBool(vals[0])
- }
- return
-}
-
-// trimPrefix removes a given prefix and any slash following the prefix
-// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == ""
-func trimPrefix(p, prefix string) (s string) {
- s = strings.TrimPrefix(p, prefix)
- s = strings.TrimPrefix(s, "/")
- return
-}
-
-func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection {
- c := httptypes.MemberCollection(make([]httptypes.Member, len(ms)))
-
- for i, m := range ms {
- c[i] = newMember(m)
- }
-
- return &c
-}
-
-func newMember(m *membership.Member) httptypes.Member {
- tm := httptypes.Member{
- ID: m.ID.String(),
- Name: m.Name,
- PeerURLs: make([]string, len(m.PeerURLs)),
- ClientURLs: make([]string, len(m.ClientURLs)),
- }
-
- copy(tm.PeerURLs, m.PeerURLs)
- copy(tm.ClientURLs, m.ClientURLs)
-
- return tm
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go
deleted file mode 100644
index 606e2e0..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go
+++ /dev/null
@@ -1,543 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "encoding/json"
- "net/http"
- "path"
- "strings"
-
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/auth"
-)
-
-type authHandler struct {
- sec auth.Store
- cluster api.Cluster
- clientCertAuthEnabled bool
-}
-
-func hasWriteRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
- if r.Method == "GET" || r.Method == "HEAD" {
- return true
- }
- return hasRootAccess(sec, r, clientCertAuthEnabled)
-}
-
-func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User {
- username, password, ok := r.BasicAuth()
- if !ok {
- plog.Warningf("auth: malformed basic auth encoding")
- return nil
- }
- user, err := sec.GetUser(username)
- if err != nil {
- return nil
- }
-
- ok = sec.CheckPassword(user, password)
- if !ok {
- plog.Warningf("auth: incorrect password for user: %s", username)
- return nil
- }
- return &user
-}
-
-func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User {
- if r.TLS == nil {
- return nil
- }
-
- for _, chains := range r.TLS.VerifiedChains {
- for _, chain := range chains {
- plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName)
- user, err := sec.GetUser(chain.Subject.CommonName)
- if err == nil {
- plog.Debugf("auth: authenticated user %s by cert common name.", user.User)
- return &user
- }
- }
- }
- return nil
-}
-
-func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
- if sec == nil {
- // No store means no auth available, eg, tests.
- return true
- }
- if !sec.AuthEnabled() {
- return true
- }
-
- var rootUser *auth.User
- if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
- rootUser = userFromClientCertificate(sec, r)
- if rootUser == nil {
- return false
- }
- } else {
- rootUser = userFromBasicAuth(sec, r)
- if rootUser == nil {
- return false
- }
- }
-
- for _, role := range rootUser.Roles {
- if role == auth.RootRoleName {
- return true
- }
- }
- plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, auth.RootRoleName, r.URL.Path)
- return false
-}
-
-func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
- if sec == nil {
- // No store means no auth available, eg, tests.
- return true
- }
- if !sec.AuthEnabled() {
- return true
- }
-
- var user *auth.User
- if r.Header.Get("Authorization") == "" {
- if clientCertAuthEnabled {
- user = userFromClientCertificate(sec, r)
- }
- if user == nil {
- return hasGuestAccess(sec, r, key)
- }
- } else {
- user = userFromBasicAuth(sec, r)
- if user == nil {
- return false
- }
- }
-
- writeAccess := r.Method != "GET" && r.Method != "HEAD"
- for _, roleName := range user.Roles {
- role, err := sec.GetRole(roleName)
- if err != nil {
- continue
- }
- if recursive {
- if role.HasRecursiveAccess(key, writeAccess) {
- return true
- }
- } else if role.HasKeyAccess(key, writeAccess) {
- return true
- }
- }
- plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key)
- return false
-}
-
-func hasGuestAccess(sec auth.Store, r *http.Request, key string) bool {
- writeAccess := r.Method != "GET" && r.Method != "HEAD"
- role, err := sec.GetRole(auth.GuestRoleName)
- if err != nil {
- return false
- }
- if role.HasKeyAccess(key, writeAccess) {
- return true
- }
- plog.Warningf("auth: invalid access for unauthenticated user on resource %s.", key)
- return false
-}
-
-func writeNoAuth(w http.ResponseWriter, r *http.Request) {
- herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials")
- if err := herr.WriteTo(w); err != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
- }
-}
-
-func handleAuth(mux *http.ServeMux, sh *authHandler) {
- mux.HandleFunc(authPrefix+"/roles", capabilityHandler(api.AuthCapability, sh.baseRoles))
- mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(api.AuthCapability, sh.handleRoles))
- mux.HandleFunc(authPrefix+"/users", capabilityHandler(api.AuthCapability, sh.baseUsers))
- mux.HandleFunc(authPrefix+"/users/", capabilityHandler(api.AuthCapability, sh.handleUsers))
- mux.HandleFunc(authPrefix+"/enable", capabilityHandler(api.AuthCapability, sh.enableDisable))
-}
-
-func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
-
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- roles, err := sh.sec.AllRoles()
- if err != nil {
- writeError(w, r, err)
- return
- }
- if roles == nil {
- roles = make([]string, 0)
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- var rolesCollections struct {
- Roles []auth.Role `json:"roles"`
- }
- for _, roleName := range roles {
- var role auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- writeError(w, r, err)
- return
- }
- rolesCollections.Roles = append(rolesCollections.Roles, role)
- }
- err = json.NewEncoder(w).Encode(rolesCollections)
-
- if err != nil {
- plog.Warningf("baseRoles error encoding on %s", r.URL)
- writeError(w, r, err)
- return
- }
-}
-
-func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) {
- subpath := path.Clean(r.URL.Path[len(authPrefix):])
- // Split "/roles/rolename/command".
- // First item is an empty string, second is "roles"
- pieces := strings.Split(subpath, "/")
- if len(pieces) == 2 {
- sh.baseRoles(w, r)
- return
- }
- if len(pieces) != 3 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
- return
- }
- sh.forRole(w, r, pieces[2])
-}
-
-func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- switch r.Method {
- case "GET":
- data, err := sh.sec.GetRole(role)
- if err != nil {
- writeError(w, r, err)
- return
- }
- err = json.NewEncoder(w).Encode(data)
- if err != nil {
- plog.Warningf("forRole error encoding on %s", r.URL)
- return
- }
- return
- case "PUT":
- var in auth.Role
- err := json.NewDecoder(r.Body).Decode(&in)
- if err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
- return
- }
- if in.Role != role {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
- return
- }
-
- var out auth.Role
-
- // create
- if in.Grant.IsEmpty() && in.Revoke.IsEmpty() {
- err = sh.sec.CreateRole(in)
- if err != nil {
- writeError(w, r, err)
- return
- }
- w.WriteHeader(http.StatusCreated)
- out = in
- } else {
- if !in.Permissions.IsEmpty() {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
- return
- }
- out, err = sh.sec.UpdateRole(in)
- if err != nil {
- writeError(w, r, err)
- return
- }
- w.WriteHeader(http.StatusOK)
- }
-
- err = json.NewEncoder(w).Encode(out)
- if err != nil {
- plog.Warningf("forRole error encoding on %s", r.URL)
- return
- }
- return
- case "DELETE":
- err := sh.sec.DeleteRole(role)
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-}
-
-type userWithRoles struct {
- User string `json:"user"`
- Roles []auth.Role `json:"roles,omitempty"`
-}
-
-type usersCollections struct {
- Users []userWithRoles `json:"users"`
-}
-
-func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- users, err := sh.sec.AllUsers()
- if err != nil {
- writeError(w, r, err)
- return
- }
- if users == nil {
- users = make([]string, 0)
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- ucs := usersCollections{}
- for _, userName := range users {
- var user auth.User
- user, err = sh.sec.GetUser(userName)
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- uwr := userWithRoles{User: user.User}
- for _, roleName := range user.Roles {
- var role auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- continue
- }
- uwr.Roles = append(uwr.Roles, role)
- }
-
- ucs.Users = append(ucs.Users, uwr)
- }
- err = json.NewEncoder(w).Encode(ucs)
-
- if err != nil {
- plog.Warningf("baseUsers error encoding on %s", r.URL)
- writeError(w, r, err)
- return
- }
-}
-
-func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) {
- subpath := path.Clean(r.URL.Path[len(authPrefix):])
- // Split "/users/username".
- // First item is an empty string, second is "users"
- pieces := strings.Split(subpath, "/")
- if len(pieces) == 2 {
- sh.baseUsers(w, r)
- return
- }
- if len(pieces) != 3 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
- return
- }
- sh.forUser(w, r, pieces[2])
-}
-
-func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- switch r.Method {
- case "GET":
- u, err := sh.sec.GetUser(user)
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- uwr := userWithRoles{User: u.User}
- for _, roleName := range u.Roles {
- var role auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- writeError(w, r, err)
- return
- }
- uwr.Roles = append(uwr.Roles, role)
- }
- err = json.NewEncoder(w).Encode(uwr)
-
- if err != nil {
- plog.Warningf("forUser error encoding on %s", r.URL)
- return
- }
- return
- case "PUT":
- var u auth.User
- err := json.NewDecoder(r.Body).Decode(&u)
- if err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
- return
- }
- if u.User != user {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
- return
- }
-
- var (
- out auth.User
- created bool
- )
-
- if len(u.Grant) == 0 && len(u.Revoke) == 0 {
- // create or update
- if len(u.Roles) != 0 {
- out, err = sh.sec.CreateUser(u)
- } else {
- // if user passes in both password and roles, we are unsure about his/her
- // intention.
- out, created, err = sh.sec.CreateOrUpdateUser(u)
- }
-
- if err != nil {
- writeError(w, r, err)
- return
- }
- } else {
- // update case
- if len(u.Roles) != 0 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
- return
- }
- out, err = sh.sec.UpdateUser(u)
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-
- if created {
- w.WriteHeader(http.StatusCreated)
- } else {
- w.WriteHeader(http.StatusOK)
- }
-
- out.Password = ""
-
- err = json.NewEncoder(w).Encode(out)
- if err != nil {
- plog.Warningf("forUser error encoding on %s", r.URL)
- return
- }
- return
- case "DELETE":
- err := sh.sec.DeleteUser(user)
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-}
-
-type enabled struct {
- Enabled bool `json:"enabled"`
-}
-
-func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasWriteRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
- isEnabled := sh.sec.AuthEnabled()
- switch r.Method {
- case "GET":
- jsonDict := enabled{isEnabled}
- err := json.NewEncoder(w).Encode(jsonDict)
- if err != nil {
- plog.Warningf("error encoding auth state on %s", r.URL)
- }
- case "PUT":
- err := sh.sec.EnableAuth()
- if err != nil {
- writeError(w, r, err)
- return
- }
- case "DELETE":
- err := sh.sec.DisableAuth()
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go
deleted file mode 100644
index 475c4b1..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v2http provides etcd client and server implementations.
-package v2http
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
deleted file mode 100644
index 589c172..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "math"
- "net/http"
- "strings"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/auth"
- "github.com/coreos/etcd/pkg/logutil"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- // time to wait for a Watch request
- defaultWatchTimeout = time.Duration(math.MaxInt64)
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http")
- mlog = logutil.NewMergeLogger(plog)
-)
-
-func writeError(w http.ResponseWriter, r *http.Request, err error) {
- if err == nil {
- return
- }
- if e, ok := err.(auth.Error); ok {
- herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
- if et := herr.WriteTo(w); et != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
- }
- return
- }
- etcdhttp.WriteError(w, r, err)
-}
-
-// allowMethod verifies that the given method is one of the allowed methods,
-// and if not, it writes an error to w. A boolean is returned indicating
-// whether or not the method is allowed.
-func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
- for _, meth := range ms {
- if m == meth {
- return true
- }
- }
- w.Header().Set("Allow", strings.Join(ms, ","))
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return false
-}
-
-func requestLogger(handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- plog.Debugf("[%s] %s remote:%s", r.Method, r.RequestURI, r.RemoteAddr)
- handler.ServeHTTP(w, r)
- })
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
deleted file mode 100644
index 0657604..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptypes
-
-import (
- "encoding/json"
- "net/http"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes")
-)
-
-type HTTPError struct {
- Message string `json:"message"`
- // Code is the HTTP status code
- Code int `json:"-"`
-}
-
-func (e HTTPError) Error() string {
- return e.Message
-}
-
-func (e HTTPError) WriteTo(w http.ResponseWriter) error {
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(e.Code)
- b, err := json.Marshal(e)
- if err != nil {
- plog.Panicf("marshal HTTPError should never fail (%v)", err)
- }
- if _, err := w.Write(b); err != nil {
- return err
- }
- return nil
-}
-
-func NewHTTPError(code int, m string) *HTTPError {
- return &HTTPError{
- Message: m,
- Code: code,
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go
deleted file mode 100644
index 738d744..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package httptypes defines how etcd's HTTP API entities are serialized to and
-// deserialized from JSON.
-package httptypes
-
-import (
- "encoding/json"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-type Member struct {
- ID string `json:"id"`
- Name string `json:"name"`
- PeerURLs []string `json:"peerURLs"`
- ClientURLs []string `json:"clientURLs"`
-}
-
-type MemberCreateRequest struct {
- PeerURLs types.URLs
-}
-
-type MemberUpdateRequest struct {
- MemberCreateRequest
-}
-
-func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
- s := struct {
- PeerURLs []string `json:"peerURLs"`
- }{}
-
- err := json.Unmarshal(data, &s)
- if err != nil {
- return err
- }
-
- urls, err := types.NewURLs(s.PeerURLs)
- if err != nil {
- return err
- }
-
- m.PeerURLs = urls
- return nil
-}
-
-type MemberCollection []Member
-
-func (c *MemberCollection) MarshalJSON() ([]byte, error) {
- d := struct {
- Members []Member `json:"members"`
- }{
- Members: []Member(*c),
- }
-
- return json.Marshal(d)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go
deleted file mode 100644
index fdfb0c6..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "strconv"
- "time"
-
- "net/http"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- incomingEvents = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "received_total",
- Help: "Counter of requests received into the system (successfully parsed and authd).",
- }, []string{"method"})
-
- failedEvents = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "failed_total",
- Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).",
- }, []string{"method", "code"})
-
- successfulEventsHandlingTime = prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "successful_duration_seconds",
- Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
- Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
- }, []string{"method"})
-)
-
-func init() {
- prometheus.MustRegister(incomingEvents)
- prometheus.MustRegister(failedEvents)
- prometheus.MustRegister(successfulEventsHandlingTime)
-}
-
-func reportRequestReceived(request etcdserverpb.Request) {
- incomingEvents.WithLabelValues(methodFromRequest(request)).Inc()
-}
-
-func reportRequestCompleted(request etcdserverpb.Request, response etcdserver.Response, startTime time.Time) {
- method := methodFromRequest(request)
- successfulEventsHandlingTime.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
-}
-
-func reportRequestFailed(request etcdserverpb.Request, err error) {
- method := methodFromRequest(request)
- failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc()
-}
-
-func methodFromRequest(request etcdserverpb.Request) string {
- if request.Method == "GET" && request.Quorum {
- return "QGET"
- }
- return request.Method
-}
-
-func codeFromError(err error) int {
- if err == nil {
- return http.StatusInternalServerError
- }
- switch e := err.(type) {
- case *etcdErr.Error:
- return (*etcdErr.Error)(e).StatusCode()
- case *httptypes.HTTPError:
- return (*httptypes.HTTPError)(e).Code
- default:
- return http.StatusInternalServerError
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go
deleted file mode 100644
index b53e6d7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/go-semver/semver"
-)
-
-func (s *v2v3Server) ID() types.ID {
- // TODO: use an actual member ID
- return types.ID(0xe7cd2f00d)
-}
-func (s *v2v3Server) ClientURLs() []string { panic("STUB") }
-func (s *v2v3Server) Members() []*membership.Member { panic("STUB") }
-func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") }
-func (s *v2v3Server) Version() *semver.Version { panic("STUB") }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go
deleted file mode 100644
index 2ff372f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client.
-package v2v3
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go
deleted file mode 100644
index 2ef63ce..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "net/http"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/go-semver/semver"
-)
-
-type fakeStats struct{}
-
-func (s *fakeStats) SelfStats() []byte { return nil }
-func (s *fakeStats) LeaderStats() []byte { return nil }
-func (s *fakeStats) StoreStats() []byte { return nil }
-
-type v2v3Server struct {
- c *clientv3.Client
- store *v2v3Store
- fakeStats
-}
-
-func NewServer(c *clientv3.Client, pfx string) etcdserver.ServerPeer {
- return &v2v3Server{c: c, store: newStore(c, pfx)}
-}
-
-func (s *v2v3Server) ClientCertAuthEnabled() bool { return false }
-
-func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") }
-func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") }
-
-func (s *v2v3Server) Leader() types.ID {
- ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
- defer cancel()
- resp, err := s.c.Status(ctx, s.c.Endpoints()[0])
- if err != nil {
- return 0
- }
- return types.ID(resp.Leader)
-}
-
-func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
- resp, err := s.c.MemberAdd(ctx, memb.PeerURLs)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- resp, err := s.c.MemberRemove(ctx, id)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
- resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member {
- membs := make([]*membership.Member, len(v3membs))
- for i, m := range v3membs {
- membs[i] = &membership.Member{
- ID: types.ID(m.ID),
- RaftAttributes: membership.RaftAttributes{
- PeerURLs: m.PeerURLs,
- },
- Attributes: membership.Attributes{
- Name: m.Name,
- ClientURLs: m.ClientURLs,
- },
- }
- }
- return membs
-}
-
-func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() }
-func (s *v2v3Server) Cluster() api.Cluster { return s }
-func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil }
-
-func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
- applier := etcdserver.NewApplierV2(s.store, nil)
- reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier)
- req := (*etcdserver.RequestV2)(&r)
- resp, err := req.Handle(ctx, reqHandler)
- if resp.Err != nil {
- return resp, resp.Err
- }
- return resp, err
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go
deleted file mode 100644
index 444f93f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go
+++ /dev/null
@@ -1,620 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "fmt"
- "path"
- "strings"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/store"
-)
-
-// store implements the Store interface for V2 using
-// a v3 client.
-type v2v3Store struct {
- c *clientv3.Client
- // pfx is the v3 prefix where keys should be stored.
- pfx string
- ctx context.Context
-}
-
-const maxPathDepth = 63
-
-var errUnsupported = fmt.Errorf("TTLs are unsupported")
-
-func NewStore(c *clientv3.Client, pfx string) store.Store { return newStore(c, pfx) }
-
-func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} }
-
-func (s *v2v3Store) Index() uint64 { panic("STUB") }
-
-func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*store.Event, error) {
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).Then(
- clientv3.OpGet(key+"/"),
- clientv3.OpGet(key),
- ).Commit()
- if err != nil {
- return nil, err
- }
-
- if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) {
- nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision)
- if err != nil {
- return nil, err
- }
- cidx, midx := uint64(0), uint64(0)
- if len(kvs) > 0 {
- cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision)
- }
- return &store.Event{
- Action: store.Get,
- Node: &store.NodeExtern{
- Key: nodePath,
- Dir: true,
- Nodes: nodes,
- CreatedIndex: cidx,
- ModifiedIndex: midx,
- },
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
- }
-
- kvs := resp.Responses[1].GetResponseRange().Kvs
- if len(kvs) == 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- return &store.Event{
- Action: store.Get,
- Node: s.mkV2Node(kvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*store.NodeExtern, error) {
- rootNodes, err := s.getDirDepth(nodePath, 1, rev)
- if err != nil || !recursive {
- return rootNodes, err
- }
- nextNodes := rootNodes
- nodes := make(map[string]*store.NodeExtern)
- // Breadth walk the subdirectories
- for i := 2; len(nextNodes) > 0; i++ {
- for _, n := range nextNodes {
- nodes[n.Key] = n
- if parent := nodes[path.Dir(n.Key)]; parent != nil {
- parent.Nodes = append(parent.Nodes, n)
- }
- }
- if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil {
- return nil, err
- }
- }
- return rootNodes, nil
-}
-
-func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*store.NodeExtern, error) {
- pd := s.mkPathDepth(nodePath, depth)
- resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev))
- if err != nil {
- return nil, err
- }
-
- nodes := make([]*store.NodeExtern, len(resp.Kvs))
- for i, kv := range resp.Kvs {
- nodes[i] = s.mkV2Node(kv)
- }
- return nodes, nil
-}
-
-func (s *v2v3Store) Set(
- nodePath string,
- dir bool,
- value string,
- expireOpts store.TTLOptionSet,
-) (*store.Event, error) {
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
-
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- parent := path.Dir(nodePath)
- if !isRoot(parent) && stm.Rev(s.mkPath(parent)+"/") == 0 {
- ecode = etcdErr.EcodeKeyNotFound
- return nil
- }
-
- key := s.mkPath(nodePath)
- if dir {
- if stm.Rev(key) != 0 {
- // exists as non-dir
- ecode = etcdErr.EcodeNotDir
- return nil
- }
- key = key + "/"
- } else if stm.Rev(key+"/") != 0 {
- ecode = etcdErr.EcodeNotFile
- return nil
- }
- stm.Put(key, value, clientv3.WithPrevKV())
- stm.Put(s.mkActionKey(), store.Set)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- createRev := resp.Header.Revision
- var pn *store.NodeExtern
- if pkv := prevKeyFromPuts(resp); pkv != nil {
- pn = s.mkV2Node(pkv)
- createRev = pkv.CreateRevision
- }
-
- vp := &value
- if dir {
- vp = nil
- }
- return &store.Event{
- Action: store.Set,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: vp,
- Dir: dir,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(createRev),
- },
- PrevNode: pn,
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Update(nodePath, newValue string, expireOpts store.TTLOptionSet) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
-
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- key := s.mkPath(nodePath)
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- if rev := stm.Rev(key + "/"); rev != 0 {
- ecode = etcdErr.EcodeNotFile
- return nil
- }
- if rev := stm.Rev(key); rev == 0 {
- ecode = etcdErr.EcodeKeyNotFound
- return nil
- }
- stm.Put(key, newValue, clientv3.WithPrevKV())
- stm.Put(s.mkActionKey(), store.Update)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- pkv := prevKeyFromPuts(resp)
- return &store.Event{
- Action: store.Update,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: &newValue,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Create(
- nodePath string,
- dir bool,
- value string,
- unique bool,
- expireOpts store.TTLOptionSet,
-) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- ecode = 0
- key := s.mkPath(nodePath)
- if unique {
- // append unique item under the node path
- for {
- key = nodePath + "/" + fmt.Sprintf("%020s", time.Now())
- key = path.Clean(path.Join("/", key))
- key = s.mkPath(key)
- if stm.Rev(key) == 0 {
- break
- }
- }
- }
- if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 {
- ecode = etcdErr.EcodeNodeExist
- return nil
- }
- // build path if any directories in path do not exist
- dirs := []string{}
- for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
- pp := s.mkPath(p)
- if stm.Rev(pp) > 0 {
- ecode = etcdErr.EcodeNotDir
- return nil
- }
- if stm.Rev(pp+"/") == 0 {
- dirs = append(dirs, pp+"/")
- }
- }
- for _, d := range dirs {
- stm.Put(d, "")
- }
-
- if dir {
- // directories marked with extra slash in key name
- key += "/"
- }
- stm.Put(key, value)
- stm.Put(s.mkActionKey(), store.Create)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- var v *string
- if !dir {
- v = &value
- }
-
- return &store.Event{
- Action: store.Create,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: v,
- Dir: dir,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(resp.Header.Revision),
- },
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) CompareAndSwap(
- nodePath string,
- prevValue string,
- prevIndex uint64,
- value string,
- expireOpts store.TTLOptionSet,
-) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).If(
- s.mkCompare(nodePath, prevValue, prevIndex)...,
- ).Then(
- clientv3.OpPut(key, value, clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.CompareAndSwap),
- ).Else(
- clientv3.OpGet(key),
- clientv3.OpGet(key+"/"),
- ).Commit()
-
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, compareFail(nodePath, prevValue, prevIndex, resp)
- }
-
- pkv := resp.Responses[0].GetResponsePut().PrevKv
- return &store.Event{
- Action: store.CompareAndSwap,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: &value,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
- if !dir && !recursive {
- return s.deleteNode(nodePath)
- }
- if !recursive {
- return s.deleteEmptyDir(nodePath)
- }
-
- dels := make([]clientv3.Op, maxPathDepth+1)
- dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV())
- for i := 1; i < maxPathDepth; i++ {
- dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix())
- }
- dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), store.Delete)
-
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0),
- clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0),
- ).Then(
- dels...,
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
- }
- dresp := resp.Responses[0].GetResponseDeleteRange()
- return &store.Event{
- Action: store.Delete,
- PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) deleteEmptyDir(nodePath string) (*store.Event, error) {
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(),
- ).Then(
- clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.Delete),
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, etcdErr.NewError(etcdErr.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision))
- }
- dresp := resp.Responses[0].GetResponseDeleteRange()
- if len(dresp.PrevKvs) == 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
- }
- return &store.Event{
- Action: store.Delete,
- PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) deleteNode(nodePath string) (*store.Event, error) {
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0),
- ).Then(
- clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.Delete),
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
- pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs
- if len(pkvs) == 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
- pkv := pkvs[0]
- return &store.Event{
- Action: store.Delete,
- Node: &store.NodeExtern{
- Key: nodePath,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
-
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).If(
- s.mkCompare(nodePath, prevValue, prevIndex)...,
- ).Then(
- clientv3.OpDelete(key, clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.CompareAndDelete),
- ).Else(
- clientv3.OpGet(key),
- clientv3.OpGet(key+"/"),
- ).Commit()
-
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, compareFail(nodePath, prevValue, prevIndex, resp)
- }
-
- // len(pkvs) > 1 since txn only succeeds when key exists
- pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0]
- return &store.Event{
- Action: store.CompareAndDelete,
- Node: &store.NodeExtern{
- Key: nodePath,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error {
- if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 {
- return etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
- kvs := resp.Responses[0].GetResponseRange().Kvs
- if len(kvs) == 0 {
- return etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
- kv := kvs[0]
- indexMatch := (prevIndex == 0 || kv.ModRevision == int64(prevIndex))
- valueMatch := (prevValue == "" || string(kv.Value) == prevValue)
- var cause string
- switch {
- case indexMatch && !valueMatch:
- cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value))
- case valueMatch && !indexMatch:
- cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision)
- default:
- cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision)
- }
- return etcdErr.NewError(etcdErr.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision))
-}
-
-func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp {
- key := s.mkPath(nodePath)
- cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)}
- if prevIndex != 0 {
- cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex)))
- }
- if prevValue != "" {
- cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue))
- }
- return cmps
-}
-
-func (s *v2v3Store) JsonStats() []byte { panic("STUB") }
-func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") }
-
-func (s *v2v3Store) Version() int { return 2 }
-
-// TODO: move this out of the Store interface?
-
-func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") }
-func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") }
-func (s *v2v3Store) Clone() store.Store { panic("STUB") }
-func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") }
-func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") }
-
-func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) }
-
-func (s *v2v3Store) mkNodePath(p string) string {
- return path.Clean(p[len(s.pfx)+len("/k/000/"):])
-}
-
-// mkPathDepth makes a path to a key that encodes its directory depth
-// for fast directory listing. If a depth is provided, it is added
-// to the computed depth.
-func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string {
- normalForm := path.Clean(path.Join("/", nodePath))
- n := strings.Count(normalForm, "/") + depth
- return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
-}
-
-func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" }
-
-func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" }
-
-func mkV2Rev(v3Rev int64) uint64 {
- if v3Rev == 0 {
- return 0
- }
- return uint64(v3Rev - 1)
-}
-
-func mkV3Rev(v2Rev uint64) int64 {
- if v2Rev == 0 {
- return 0
- }
- return int64(v2Rev + 1)
-}
-
-// mkV2Node creates a V2 NodeExtern from a V3 KeyValue
-func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *store.NodeExtern {
- if kv == nil {
- return nil
- }
- n := &store.NodeExtern{
- Key: string(s.mkNodePath(string(kv.Key))),
- Dir: kv.Key[len(kv.Key)-1] == '/',
- CreatedIndex: mkV2Rev(kv.CreateRevision),
- ModifiedIndex: mkV2Rev(kv.ModRevision),
- }
- if !n.Dir {
- v := string(kv.Value)
- n.Value = &v
- }
- return n
-}
-
-// prevKeyFromPuts gets the prev key that is being put; ignores
-// the put action response.
-func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue {
- for _, r := range resp.Responses {
- pkv := r.GetResponsePut().PrevKv
- if pkv != nil && pkv.CreateRevision > 0 {
- return pkv
- }
- }
- return nil
-}
-
-func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) {
- return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable))
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go
deleted file mode 100644
index 1c2680e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "strings"
-
- "github.com/coreos/etcd/clientv3"
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/store"
-)
-
-func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (store.Watcher, error) {
- ctx, cancel := context.WithCancel(s.ctx)
- wch := s.c.Watch(
- ctx,
- // TODO: very pricey; use a single store-wide watch in future
- s.pfx,
- clientv3.WithPrefix(),
- clientv3.WithRev(int64(sinceIndex)),
- clientv3.WithCreatedNotify(),
- clientv3.WithPrevKV())
- resp, ok := <-wch
- if err := resp.Err(); err != nil || !ok {
- cancel()
- return nil, etcdErr.NewError(etcdErr.EcodeRaftInternal, prefix, 0)
- }
-
- evc, donec := make(chan *store.Event), make(chan struct{})
- go func() {
- defer func() {
- close(evc)
- close(donec)
- }()
- for resp := range wch {
- for _, ev := range s.mkV2Events(resp) {
- k := ev.Node.Key
- if recursive {
- if !strings.HasPrefix(k, prefix) {
- continue
- }
- // accept events on hidden keys given in prefix
- k = strings.Replace(k, prefix, "/", 1)
- // ignore hidden keys deeper than prefix
- if strings.Contains(k, "/_") {
- continue
- }
- }
- if !recursive && k != prefix {
- continue
- }
- select {
- case evc <- ev:
- case <-ctx.Done():
- return
- }
- if !stream {
- return
- }
- }
- }
- }()
-
- return &v2v3Watcher{
- startRev: resp.Header.Revision,
- evc: evc,
- donec: donec,
- cancel: cancel,
- }, nil
-}
-
-func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*store.Event) {
- ak := s.mkActionKey()
- for _, rev := range mkRevs(wr) {
- var act, key *clientv3.Event
- for _, ev := range rev {
- if string(ev.Kv.Key) == ak {
- act = ev
- } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) {
- // use longest key to ignore intermediate new
- // directories from Create.
- key = ev
- } else if key == nil {
- key = ev
- }
- }
- v2ev := &store.Event{
- Action: string(act.Kv.Value),
- Node: s.mkV2Node(key.Kv),
- PrevNode: s.mkV2Node(key.PrevKv),
- EtcdIndex: mkV2Rev(wr.Header.Revision),
- }
- evs = append(evs, v2ev)
- }
- return evs
-}
-
-func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) {
- var curRev []*clientv3.Event
- for _, ev := range wr.Events {
- if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision {
- revs = append(revs, curRev)
- curRev = nil
- }
- curRev = append(curRev, ev)
- }
- if curRev != nil {
- revs = append(revs, curRev)
- }
- return revs
-}
-
-type v2v3Watcher struct {
- startRev int64
- evc chan *store.Event
- donec chan struct{}
- cancel context.CancelFunc
-}
-
-func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) }
-
-func (w *v2v3Watcher) Remove() {
- w.cancel()
- <-w.donec
-}
-
-func (w *v2v3Watcher) EventChan() chan *store.Event { return w.evc }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
deleted file mode 100644
index 310715f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v3client provides clientv3 interfaces from an etcdserver.
-//
-// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
-//
-// import (
-// "context"
-//
-// "github.com/coreos/etcd/embed"
-// "github.com/coreos/etcd/etcdserver/api/v3client"
-// )
-//
-// ...
-//
-// // create an embedded EtcdServer from the default configuration
-// cfg := embed.NewConfig()
-// cfg.Dir = "default.etcd"
-// e, err := embed.StartEtcd(cfg)
-// if err != nil {
-// // handle error!
-// }
-//
-// // wrap the EtcdServer with v3client
-// cli := v3client.New(e.Server)
-//
-// // use like an ordinary clientv3
-// resp, err := cli.Put(context.TODO(), "some-key", "it works!")
-// if err != nil {
-// // handle error!
-// }
-//
-package v3client
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
deleted file mode 100644
index ab48ea7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3client
-
-import (
- "context"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc"
- "github.com/coreos/etcd/proxy/grpcproxy/adapter"
-)
-
-// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
-// of making gRPC calls through sockets, the client makes direct function calls
-// to the etcd server through its api/v3rpc function interfaces.
-func New(s *etcdserver.EtcdServer) *clientv3.Client {
- c := clientv3.NewCtxClient(context.Background())
-
- kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s))
- c.KV = clientv3.NewKVFromKVClient(kvc, c)
-
- lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s))
- c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second)
-
- wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
- c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)}
-
- mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s))
- c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c)
-
- clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
- c.Cluster = clientv3.NewClusterFromClusterClient(clc, c)
-
- // TODO: implement clientv3.Auth interface?
-
- return c
-}
-
-// BlankContext implements Stringer on a context so the ctx string doesn't
-// depend on the context's WithValue data, which tends to be unsynchronized
-// (e.g., x/net/trace), causing ctx.String() to throw data races.
-type blankContext struct{ context.Context }
-
-func (*blankContext) String() string { return "(blankCtx)" }
-
-// watchWrapper wraps clientv3 watch calls to blank out the context
-// to avoid races on trace data.
-type watchWrapper struct{ clientv3.Watcher }
-
-func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
- return ww.Watcher.Watch(&blankContext{ctx}, key, opts...)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go
deleted file mode 100644
index d6fefd7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v3election provides a v3 election service from an etcdserver.
-package v3election
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
deleted file mode 100644
index c66d7a3..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3election
-
-import (
- "context"
- "errors"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
-)
-
-// ErrMissingLeaderKey is returned when election API request
-// is missing the "leader" field.
-var ErrMissingLeaderKey = errors.New(`"leader" field must be provided`)
-
-type electionServer struct {
- c *clientv3.Client
-}
-
-func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
- return &electionServer{c}
-}
-
-func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
- s, err := es.session(ctx, req.Lease)
- if err != nil {
- return nil, err
- }
- e := concurrency.NewElection(s, string(req.Name))
- if err = e.Campaign(ctx, string(req.Value)); err != nil {
- return nil, err
- }
- return &epb.CampaignResponse{
- Header: e.Header(),
- Leader: &epb.LeaderKey{
- Name: req.Name,
- Key: []byte(e.Key()),
- Rev: e.Rev(),
- Lease: int64(s.Lease()),
- },
- }, nil
-}
-
-func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
- if req.Leader == nil {
- return nil, ErrMissingLeaderKey
- }
- s, err := es.session(ctx, req.Leader.Lease)
- if err != nil {
- return nil, err
- }
- e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
- if err := e.Proclaim(ctx, string(req.Value)); err != nil {
- return nil, err
- }
- return &epb.ProclaimResponse{Header: e.Header()}, nil
-}
-
-func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
- s, err := es.session(stream.Context(), -1)
- if err != nil {
- return err
- }
- e := concurrency.NewElection(s, string(req.Name))
- ch := e.Observe(stream.Context())
- for stream.Context().Err() == nil {
- select {
- case <-stream.Context().Done():
- case resp, ok := <-ch:
- if !ok {
- return nil
- }
- lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
- if err := stream.Send(lresp); err != nil {
- return err
- }
- }
- }
- return stream.Context().Err()
-}
-
-func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
- s, err := es.session(ctx, -1)
- if err != nil {
- return nil, err
- }
- l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
- if lerr != nil {
- return nil, lerr
- }
- return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
-}
-
-func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
- if req.Leader == nil {
- return nil, ErrMissingLeaderKey
- }
- s, err := es.session(ctx, req.Leader.Lease)
- if err != nil {
- return nil, err
- }
- e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
- if err := e.Resign(ctx); err != nil {
- return nil, err
- }
- return &epb.ResignResponse{Header: e.Header()}, nil
-}
-
-func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
- s, err := concurrency.NewSession(
- es.c,
- concurrency.WithLease(clientv3.LeaseID(lease)),
- concurrency.WithContext(ctx),
- )
- if err != nil {
- return nil, err
- }
- s.Orphan()
- return s, nil
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
deleted file mode 100644
index 58368bb..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: etcdserver/api/v3election/v3electionpb/v3election.proto
-
-/*
-Package v3electionpb is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package gw
-
-import (
- "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.CampaignRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ProclaimRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.LeaderRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.LeaderRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- stream, err := client.Observe(ctx, &protoReq)
- if err != nil {
- return nil, metadata, err
- }
- header, err := stream.Header()
- if err != nil {
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-
-}
-
-func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ResignRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterElectionHandler(ctx, mux, conn)
-}
-
-// RegisterElectionHandler registers the http handlers for service Election to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn))
-}
-
-// RegisterElectionHandler registers the http handlers for service Election to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "ElectionClient" to call the correct interceptors.
-func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
-
- mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "campaign"}, ""))
-
- pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "proclaim"}, ""))
-
- pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "leader"}, ""))
-
- pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "observe"}, ""))
-
- pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "resign"}, ""))
-)
-
-var (
- forward_Election_Campaign_0 = runtime.ForwardResponseMessage
-
- forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
-
- forward_Election_Leader_0 = runtime.ForwardResponseMessage
-
- forward_Election_Observe_0 = runtime.ForwardResponseStream
-
- forward_Election_Resign_0 = runtime.ForwardResponseMessage
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
deleted file mode 100644
index cb475b8..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
+++ /dev/null
@@ -1,119 +0,0 @@
-syntax = "proto3";
-package v3electionpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/etcdserver/etcdserverpb/rpc.proto";
-import "etcd/mvcc/mvccpb/kv.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-// The election service exposes client-side election facilities as a gRPC interface.
-service Election {
- // Campaign waits to acquire leadership in an election, returning a LeaderKey
- // representing the leadership if successful. The LeaderKey can then be used
- // to issue new values on the election, transactionally guard API requests on
- // leadership still being held, and resign from the election.
- rpc Campaign(CampaignRequest) returns (CampaignResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/campaign"
- body: "*"
- };
- }
- // Proclaim updates the leader's posted value with a new value.
- rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/proclaim"
- body: "*"
- };
- }
- // Leader returns the current election proclamation, if any.
- rpc Leader(LeaderRequest) returns (LeaderResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/leader"
- body: "*"
- };
- }
- // Observe streams election proclamations in-order as made by the election's
- // elected leaders.
- rpc Observe(LeaderRequest) returns (stream LeaderResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/observe"
- body: "*"
- };
- }
- // Resign releases election leadership so other campaigners may acquire
- // leadership on the election.
- rpc Resign(ResignRequest) returns (ResignResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/resign"
- body: "*"
- };
- }
-}
-
-message CampaignRequest {
- // name is the election's identifier for the campaign.
- bytes name = 1;
- // lease is the ID of the lease attached to leadership of the election. If the
- // lease expires or is revoked before resigning leadership, then the
- // leadership is transferred to the next campaigner, if any.
- int64 lease = 2;
- // value is the initial proclaimed value set when the campaigner wins the
- // election.
- bytes value = 3;
-}
-
-message CampaignResponse {
- etcdserverpb.ResponseHeader header = 1;
- // leader describes the resources used for holding leadereship of the election.
- LeaderKey leader = 2;
-}
-
-message LeaderKey {
- // name is the election identifier that correponds to the leadership key.
- bytes name = 1;
- // key is an opaque key representing the ownership of the election. If the key
- // is deleted, then leadership is lost.
- bytes key = 2;
- // rev is the creation revision of the key. It can be used to test for ownership
- // of an election during transactions by testing the key's creation revision
- // matches rev.
- int64 rev = 3;
- // lease is the lease ID of the election leader.
- int64 lease = 4;
-}
-
-message LeaderRequest {
- // name is the election identifier for the leadership information.
- bytes name = 1;
-}
-
-message LeaderResponse {
- etcdserverpb.ResponseHeader header = 1;
- // kv is the key-value pair representing the latest leader update.
- mvccpb.KeyValue kv = 2;
-}
-
-message ResignRequest {
- // leader is the leadership to relinquish by resignation.
- LeaderKey leader = 1;
-}
-
-message ResignResponse {
- etcdserverpb.ResponseHeader header = 1;
-}
-
-message ProclaimRequest {
- // leader is the leadership hold on the election.
- LeaderKey leader = 1;
- // value is an update meant to overwrite the leader's current value.
- bytes value = 2;
-}
-
-message ProclaimResponse {
- etcdserverpb.ResponseHeader header = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go
deleted file mode 100644
index e0a1008..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v3lock provides a v3 locking service from an etcdserver.
-package v3lock
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
deleted file mode 100644
index a5efcba..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3lock
-
-import (
- "context"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
-)
-
-type lockServer struct {
- c *clientv3.Client
-}
-
-func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
- return &lockServer{c}
-}
-
-func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
- s, err := concurrency.NewSession(
- ls.c,
- concurrency.WithLease(clientv3.LeaseID(req.Lease)),
- concurrency.WithContext(ctx),
- )
- if err != nil {
- return nil, err
- }
- s.Orphan()
- m := concurrency.NewMutex(s, string(req.Name))
- if err = m.Lock(ctx); err != nil {
- return nil, err
- }
- return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
-}
-
-func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
- resp, err := ls.c.Delete(ctx, string(req.Key))
- if err != nil {
- return nil, err
- }
- return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
deleted file mode 100644
index efecc45..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto
-
-/*
-Package v3lockpb is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package gw
-
-import (
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.LockRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.UnlockRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterLockHandler(ctx, mux, conn)
-}
-
-// RegisterLockHandler registers the http handlers for service Lock to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn))
-}
-
-// RegisterLockHandler registers the http handlers for service Lock to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "LockClient" to call the correct interceptors.
-func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
-
- mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3beta", "lock"}, ""))
-
- pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lock", "unlock"}, ""))
-)
-
-var (
- forward_Lock_Lock_0 = runtime.ForwardResponseMessage
-
- forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
deleted file mode 100644
index 44b698d..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
+++ /dev/null
@@ -1,65 +0,0 @@
-syntax = "proto3";
-package v3lockpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/etcdserver/etcdserverpb/rpc.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-// The lock service exposes client-side locking facilities as a gRPC interface.
-service Lock {
- // Lock acquires a distributed shared lock on a given named lock.
- // On success, it will return a unique key that exists so long as the
- // lock is held by the caller. This key can be used in conjunction with
- // transactions to safely ensure updates to etcd only occur while holding
- // lock ownership. The lock is held until Unlock is called on the key or the
- // lease associate with the owner expires.
- rpc Lock(LockRequest) returns (LockResponse) {
- option (google.api.http) = {
- post: "/v3beta/lock/lock"
- body: "*"
- };
- }
-
- // Unlock takes a key returned by Lock and releases the hold on lock. The
- // next Lock caller waiting for the lock will then be woken up and given
- // ownership of the lock.
- rpc Unlock(UnlockRequest) returns (UnlockResponse) {
- option (google.api.http) = {
- post: "/v3beta/lock/unlock"
- body: "*"
- };
- }
-}
-
-message LockRequest {
- // name is the identifier for the distributed shared lock to be acquired.
- bytes name = 1;
- // lease is the ID of the lease that will be attached to ownership of the
- // lock. If the lease expires or is revoked and currently holds the lock,
- // the lock is automatically released. Calls to Lock with the same lease will
- // be treated as a single acquistion; locking twice with the same lease is a
- // no-op.
- int64 lease = 2;
-}
-
-message LockResponse {
- etcdserverpb.ResponseHeader header = 1;
- // key is a key that will exist on etcd for the duration that the Lock caller
- // owns the lock. Users should not modify this key or the lock may exhibit
- // undefined behavior.
- bytes key = 2;
-}
-
-message UnlockRequest {
- // key is the lock ownership key granted by Lock.
- bytes key = 1;
-}
-
-message UnlockResponse {
- etcdserverpb.ResponseHeader header = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
deleted file mode 100644
index ca8e53a..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type AuthServer struct {
- authenticator etcdserver.Authenticator
-}
-
-func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
- return &AuthServer{authenticator: s}
-}
-
-func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
- resp, err := as.authenticator.AuthEnable(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
- resp, err := as.authenticator.AuthDisable(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
- resp, err := as.authenticator.Authenticate(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- resp, err := as.authenticator.RoleAdd(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- resp, err := as.authenticator.RoleDelete(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- resp, err := as.authenticator.RoleGet(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- resp, err := as.authenticator.RoleList(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- resp, err := as.authenticator.RoleRevokePermission(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- resp, err := as.authenticator.RoleGrantPermission(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- resp, err := as.authenticator.UserAdd(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- resp, err := as.authenticator.UserDelete(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- resp, err := as.authenticator.UserGet(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- resp, err := as.authenticator.UserList(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- resp, err := as.authenticator.UserGrantRole(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- resp, err := as.authenticator.UserRevokeRole(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- resp, err := as.authenticator.UserChangePassword(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
deleted file mode 100644
index 17a2c87..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import "github.com/gogo/protobuf/proto"
-
-type codec struct{}
-
-func (c *codec) Marshal(v interface{}) ([]byte, error) {
- b, err := proto.Marshal(v.(proto.Message))
- sentBytes.Add(float64(len(b)))
- return b, err
-}
-
-func (c *codec) Unmarshal(data []byte, v interface{}) error {
- receivedBytes.Add(float64(len(data)))
- return proto.Unmarshal(data, v.(proto.Message))
-}
-
-func (c *codec) String() string {
- return "proto"
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
deleted file mode 100644
index c97e746..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "crypto/tls"
- "math"
-
- "github.com/coreos/etcd/etcdserver"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "github.com/grpc-ecosystem/go-grpc-middleware"
- "github.com/grpc-ecosystem/go-grpc-prometheus"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/health"
- healthpb "google.golang.org/grpc/health/grpc_health_v1"
-)
-
-const (
- grpcOverheadBytes = 512 * 1024
- maxStreams = math.MaxUint32
- maxSendBytes = math.MaxInt32
-)
-
-func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
- var opts []grpc.ServerOption
- opts = append(opts, grpc.CustomCodec(&codec{}))
- if tls != nil {
- opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
- }
- opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
- newLogUnaryInterceptor(s),
- newUnaryInterceptor(s),
- grpc_prometheus.UnaryServerInterceptor,
- )))
- opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
- newStreamInterceptor(s),
- grpc_prometheus.StreamServerInterceptor,
- )))
- opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
- opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
- opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
- grpcServer := grpc.NewServer(append(opts, gopts...)...)
-
- pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
- pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
- pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
- pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
- pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
- pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
-
- // server should register all the services manually
- // use empty service name for all etcd services' health status,
- // see https://github.com/grpc/grpc/blob/master/doc/health-checking.md for more
- hsrv := health.NewServer()
- hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING)
- healthpb.RegisterHealthServer(grpcServer, hsrv)
-
- // set zero values for metrics registered for this grpc server
- grpc_prometheus.Register(grpcServer)
-
- return grpcServer
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
deleted file mode 100644
index 75da52f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "github.com/coreos/etcd/etcdserver"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type header struct {
- clusterID int64
- memberID int64
- raftTimer etcdserver.RaftTimer
- rev func() int64
-}
-
-func newHeader(s *etcdserver.EtcdServer) header {
- return header{
- clusterID: int64(s.Cluster().ID()),
- memberID: int64(s.ID()),
- raftTimer: s,
- rev: func() int64 { return s.KV().Rev() },
- }
-}
-
-// fill populates pb.ResponseHeader using etcdserver information
-func (h *header) fill(rh *pb.ResponseHeader) {
- if rh == nil {
- plog.Panic("unexpected nil resp.Header")
- }
- rh.ClusterId = uint64(h.clusterID)
- rh.MemberId = uint64(h.memberID)
- rh.RaftTerm = h.raftTimer.Term()
- if rh.Revision == 0 {
- rh.Revision = h.rev()
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
deleted file mode 100644
index d594ae7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
-)
-
-const (
- maxNoLeaderCnt = 3
-)
-
-type streamsMap struct {
- mu sync.Mutex
- streams map[grpc.ServerStream]struct{}
-}
-
-func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
- if !api.IsCapabilityEnabled(api.V3rpcCapability) {
- return nil, rpctypes.ErrGRPCNotCapable
- }
-
- md, ok := metadata.FromIncomingContext(ctx)
- if ok {
- if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
- if s.Leader() == types.ID(raft.None) {
- return nil, rpctypes.ErrGRPCNoLeader
- }
- }
- }
-
- return handler(ctx, req)
- }
-}
-
-func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
- startTime := time.Now()
- resp, err := handler(ctx, req)
- defer logUnaryRequestStats(ctx, nil, info, startTime, req, resp)
- return resp, err
- }
-}
-
-func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) {
- duration := time.Since(startTime)
- remote := "No remote client info."
- peerInfo, ok := peer.FromContext(ctx)
- if ok {
- remote = peerInfo.Addr.String()
- }
- var responseType string = info.FullMethod
- var reqCount, respCount int64
- var reqSize, respSize int
- var reqContent string
- switch _resp := resp.(type) {
- case *pb.RangeResponse:
- _req, ok := req.(*pb.RangeRequest)
- if ok {
- reqCount = 0
- reqSize = _req.Size()
- reqContent = _req.String()
- }
- if _resp != nil {
- respCount = _resp.GetCount()
- respSize = _resp.Size()
- }
- case *pb.PutResponse:
- _req, ok := req.(*pb.PutRequest)
- if ok {
- reqCount = 1
- reqSize = _req.Size()
- reqContent = pb.NewLoggablePutRequest(_req).String()
- // redact value field from request content, see PR #9821
- }
- if _resp != nil {
- respCount = 0
- respSize = _resp.Size()
- }
- case *pb.DeleteRangeResponse:
- _req, ok := req.(*pb.DeleteRangeRequest)
- if ok {
- reqCount = 0
- reqSize = _req.Size()
- reqContent = _req.String()
- }
- if _resp != nil {
- respCount = _resp.GetDeleted()
- respSize = _resp.Size()
- }
- case *pb.TxnResponse:
- _req, ok := req.(*pb.TxnRequest)
- if ok && _resp != nil {
- if _resp.GetSucceeded() { // determine the 'actual' count and size of request based on success or failure
- reqCount = int64(len(_req.GetSuccess()))
- reqSize = 0
- for _, r := range _req.GetSuccess() {
- reqSize += r.Size()
- }
- } else {
- reqCount = int64(len(_req.GetFailure()))
- reqSize = 0
- for _, r := range _req.GetFailure() {
- reqSize += r.Size()
- }
- }
- reqContent = pb.NewLoggableTxnRequest(_req).String()
- // redact value field from request content, see PR #9821
- }
- if _resp != nil {
- respCount = 0
- respSize = _resp.Size()
- }
- default:
- reqCount = -1
- reqSize = -1
- respCount = -1
- respSize = -1
- }
-
- logGenericRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent)
-}
-
-func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
- reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) {
- if lg == nil {
- plog.Debugf("start time = %v, "+
- "time spent = %v, "+
- "remote = %s, "+
- "response type = %s, "+
- "request count = %d, "+
- "request size = %d, "+
- "response count = %d, "+
- "response size = %d, "+
- "request content = %s",
- startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent,
- )
- } else {
- lg.Debug("request stats",
- zap.Time("start time", startTime),
- zap.Duration("time spent", duration),
- zap.String("remote", remote),
- zap.String("response type", responseType),
- zap.Int64("request count", reqCount),
- zap.Int("request size", reqSize),
- zap.Int64("response count", respCount),
- zap.Int("response size", respSize),
- zap.String("request content", reqContent),
- )
- }
-}
-
-func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
- smap := monitorLeader(s)
-
- return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
- if !api.IsCapabilityEnabled(api.V3rpcCapability) {
- return rpctypes.ErrGRPCNotCapable
- }
-
- md, ok := metadata.FromIncomingContext(ss.Context())
- if ok {
- if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
- if s.Leader() == types.ID(raft.None) {
- return rpctypes.ErrGRPCNoLeader
- }
-
- cctx, cancel := context.WithCancel(ss.Context())
- ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
-
- smap.mu.Lock()
- smap.streams[ss] = struct{}{}
- smap.mu.Unlock()
-
- defer func() {
- smap.mu.Lock()
- delete(smap.streams, ss)
- smap.mu.Unlock()
- cancel()
- }()
-
- }
- }
-
- return handler(srv, ss)
- }
-}
-
-type serverStreamWithCtx struct {
- grpc.ServerStream
- ctx context.Context
- cancel *context.CancelFunc
-}
-
-func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
-
-func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
- smap := &streamsMap{
- streams: make(map[grpc.ServerStream]struct{}),
- }
-
- go func() {
- election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
- noLeaderCnt := 0
-
- for {
- select {
- case <-s.StopNotify():
- return
- case <-time.After(election):
- if s.Leader() == types.ID(raft.None) {
- noLeaderCnt++
- } else {
- noLeaderCnt = 0
- }
-
- // We are more conservative on canceling existing streams. Reconnecting streams
- // cost much more than just rejecting new requests. So we wait until the member
- // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
- if noLeaderCnt >= maxNoLeaderCnt {
- smap.mu.Lock()
- for ss := range smap.streams {
- if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
- (*ssWithCtx.cancel)()
- <-ss.Context().Done()
- }
- }
- smap.streams = make(map[grpc.ServerStream]struct{})
- smap.mu.Unlock()
- }
- }
- }
- }()
-
- return smap
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
deleted file mode 100644
index 9781bdd..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v3rpc implements etcd v3 RPC system based on gRPC.
-package v3rpc
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/adt"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
-)
-
-type kvServer struct {
- hdr header
- kv etcdserver.RaftKV
- // maxTxnOps is the max operations per txn.
- // e.g suppose maxTxnOps = 128.
- // Txn.Success can have at most 128 operations,
- // and Txn.Failure can have at most 128 operations.
- maxTxnOps uint
-}
-
-func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
- return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps}
-}
-
-func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- if err := checkRangeRequest(r); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.Range(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
- if err := checkPutRequest(r); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.Put(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- if err := checkDeleteRequest(r); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.DeleteRange(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil {
- return nil, err
- }
- // check for forbidden put/del overlaps after checking request to avoid quadratic blowup
- if _, _, err := checkIntervals(r.Success); err != nil {
- return nil, err
- }
- if _, _, err := checkIntervals(r.Failure); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.Txn(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
- resp, err := s.kv.Compact(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func checkRangeRequest(r *pb.RangeRequest) error {
- if len(r.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- return nil
-}
-
-func checkPutRequest(r *pb.PutRequest) error {
- if len(r.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- if r.IgnoreValue && len(r.Value) != 0 {
- return rpctypes.ErrGRPCValueProvided
- }
- if r.IgnoreLease && r.Lease != 0 {
- return rpctypes.ErrGRPCLeaseProvided
- }
- return nil
-}
-
-func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
- if len(r.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- return nil
-}
-
-func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error {
- opc := len(r.Compare)
- if opc < len(r.Success) {
- opc = len(r.Success)
- }
- if opc < len(r.Failure) {
- opc = len(r.Failure)
- }
- if opc > maxTxnOps {
- return rpctypes.ErrGRPCTooManyOps
- }
-
- for _, c := range r.Compare {
- if len(c.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- }
- for _, u := range r.Success {
- if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
- return err
- }
- }
- for _, u := range r.Failure {
- if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// checkIntervals tests whether puts and deletes overlap for a list of ops. If
-// there is an overlap, returns an error. If no overlap, return put and delete
-// sets for recursive evaluation.
-func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
- var dels adt.IntervalTree
-
- // collect deletes from this level; build first to check lower level overlapped puts
- for _, req := range reqs {
- tv, ok := req.Request.(*pb.RequestOp_RequestDeleteRange)
- if !ok {
- continue
- }
- dreq := tv.RequestDeleteRange
- if dreq == nil {
- continue
- }
- var iv adt.Interval
- if len(dreq.RangeEnd) != 0 {
- iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd))
- } else {
- iv = adt.NewStringAffinePoint(string(dreq.Key))
- }
- dels.Insert(iv, struct{}{})
- }
-
- // collect children puts/deletes
- puts := make(map[string]struct{})
- for _, req := range reqs {
- tv, ok := req.Request.(*pb.RequestOp_RequestTxn)
- if !ok {
- continue
- }
- putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success)
- if err != nil {
- return nil, dels, err
- }
- putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure)
- if err != nil {
- return nil, dels, err
- }
- for k := range putsThen {
- if _, ok := puts[k]; ok {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- if dels.Intersects(adt.NewStringAffinePoint(k)) {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- puts[k] = struct{}{}
- }
- for k := range putsElse {
- if _, ok := puts[k]; ok {
- // if key is from putsThen, overlap is OK since
- // either then/else are mutually exclusive
- if _, isSafe := putsThen[k]; !isSafe {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- }
- if dels.Intersects(adt.NewStringAffinePoint(k)) {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- puts[k] = struct{}{}
- }
- dels.Union(delsThen, adt.NewStringAffineInterval("\x00", ""))
- dels.Union(delsElse, adt.NewStringAffineInterval("\x00", ""))
- }
-
- // collect and check this level's puts
- for _, req := range reqs {
- tv, ok := req.Request.(*pb.RequestOp_RequestPut)
- if !ok || tv.RequestPut == nil {
- continue
- }
- k := string(tv.RequestPut.Key)
- if _, ok := puts[k]; ok {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- if dels.Intersects(adt.NewStringAffinePoint(k)) {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- puts[k] = struct{}{}
- }
- return puts, dels, nil
-}
-
-func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error {
- // TODO: ensure only one of the field is set.
- switch uv := u.Request.(type) {
- case *pb.RequestOp_RequestRange:
- return checkRangeRequest(uv.RequestRange)
- case *pb.RequestOp_RequestPut:
- return checkPutRequest(uv.RequestPut)
- case *pb.RequestOp_RequestDeleteRange:
- return checkDeleteRequest(uv.RequestDeleteRange)
- case *pb.RequestOp_RequestTxn:
- return checkTxnRequest(uv.RequestTxn, maxTxnOps)
- default:
- // empty op / nil entry
- return rpctypes.ErrGRPCKeyNotFound
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
deleted file mode 100644
index 5b4f2b1..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "io"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
-)
-
-type LeaseServer struct {
- hdr header
- le etcdserver.Lessor
-}
-
-func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
- return &LeaseServer{le: s, hdr: newHeader(s)}
-}
-
-func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- resp, err := ls.le.LeaseGrant(ctx, cr)
-
- if err != nil {
- return nil, togRPCError(err)
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- resp, err := ls.le.LeaseRevoke(ctx, rr)
- if err != nil {
- return nil, togRPCError(err)
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
- resp, err := ls.le.LeaseTimeToLive(ctx, rr)
- if err != nil && err != lease.ErrLeaseNotFound {
- return nil, togRPCError(err)
- }
- if err == lease.ErrLeaseNotFound {
- resp = &pb.LeaseTimeToLiveResponse{
- Header: &pb.ResponseHeader{},
- ID: rr.ID,
- TTL: -1,
- }
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
- resp, err := ls.le.LeaseLeases(ctx, rr)
- if err != nil && err != lease.ErrLeaseNotFound {
- return nil, togRPCError(err)
- }
- if err == lease.ErrLeaseNotFound {
- resp = &pb.LeaseLeasesResponse{
- Header: &pb.ResponseHeader{},
- Leases: []*pb.LeaseStatus{},
- }
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
- errc := make(chan error, 1)
- go func() {
- errc <- ls.leaseKeepAlive(stream)
- }()
- select {
- case err = <-errc:
- case <-stream.Context().Done():
- // the only server-side cancellation is noleader for now.
- err = stream.Context().Err()
- if err == context.Canceled {
- err = rpctypes.ErrGRPCNoLeader
- }
- }
- return err
-}
-
-func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
- for {
- req, err := stream.Recv()
- if err == io.EOF {
- return nil
- }
- if err != nil {
- if isClientCtxErr(stream.Context().Err(), err) {
- plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
- }
- return err
- }
-
- // Create header before we sent out the renew request.
- // This can make sure that the revision is strictly smaller or equal to
- // when the keepalive happened at the local server (when the local server is the leader)
- // or remote leader.
- // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
- // at rev 4.
- resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
- ls.hdr.fill(resp.Header)
-
- ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
- if err == lease.ErrLeaseNotFound {
- err = nil
- ttl = 0
- }
-
- if err != nil {
- return togRPCError(err)
- }
-
- resp.TTL = ttl
- err = stream.Send(resp)
- if err != nil {
- if isClientCtxErr(stream.Context().Err(), err) {
- plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
- }
- return err
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
deleted file mode 100644
index c9df180..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "crypto/sha256"
- "io"
-
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/version"
-)
-
-type KVGetter interface {
- KV() mvcc.ConsistentWatchableKV
-}
-
-type BackendGetter interface {
- Backend() backend.Backend
-}
-
-type Alarmer interface {
- Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
-}
-
-type LeaderTransferrer interface {
- MoveLeader(ctx context.Context, lead, target uint64) error
-}
-
-type RaftStatusGetter interface {
- etcdserver.RaftTimer
- ID() types.ID
- Leader() types.ID
-}
-
-type AuthGetter interface {
- AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
- AuthStore() auth.AuthStore
-}
-
-type maintenanceServer struct {
- rg RaftStatusGetter
- kg KVGetter
- bg BackendGetter
- a Alarmer
- lt LeaderTransferrer
- hdr header
-}
-
-func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
- srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)}
- return &authMaintenanceServer{srv, s}
-}
-
-func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
- plog.Noticef("starting to defragment the storage backend...")
- err := ms.bg.Backend().Defrag()
- if err != nil {
- plog.Errorf("failed to defragment the storage backend (%v)", err)
- return nil, err
- }
- plog.Noticef("finished defragmenting the storage backend")
- return &pb.DefragmentResponse{}, nil
-}
-
-func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
- snap := ms.bg.Backend().Snapshot()
- pr, pw := io.Pipe()
-
- defer pr.Close()
-
- go func() {
- snap.WriteTo(pw)
- if err := snap.Close(); err != nil {
- plog.Errorf("error closing snapshot (%v)", err)
- }
- pw.Close()
- }()
-
- // send file data
- h := sha256.New()
- br := int64(0)
- buf := make([]byte, 32*1024)
- sz := snap.Size()
- for br < sz {
- n, err := io.ReadFull(pr, buf)
- if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
- return togRPCError(err)
- }
- br += int64(n)
- resp := &pb.SnapshotResponse{
- RemainingBytes: uint64(sz - br),
- Blob: buf[:n],
- }
- if err = srv.Send(resp); err != nil {
- return togRPCError(err)
- }
- h.Write(buf[:n])
- }
-
- // send sha
- sha := h.Sum(nil)
- hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
- if err := srv.Send(hresp); err != nil {
- return togRPCError(err)
- }
-
- return nil
-}
-
-func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
- h, rev, err := ms.kg.KV().Hash()
- if err != nil {
- return nil, togRPCError(err)
- }
- resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
- ms.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
- h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev}
- ms.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- return ms.a.Alarm(ctx, ar)
-}
-
-func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
- resp := &pb.StatusResponse{
- Header: &pb.ResponseHeader{Revision: ms.hdr.rev()},
- Version: version.Version,
- DbSize: ms.bg.Backend().Size(),
- Leader: uint64(ms.rg.Leader()),
- RaftIndex: ms.rg.Index(),
- RaftTerm: ms.rg.Term(),
- }
- ms.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
- if ms.rg.ID() != ms.rg.Leader() {
- return nil, rpctypes.ErrGRPCNotLeader
- }
-
- if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
- return nil, togRPCError(err)
- }
- return &pb.MoveLeaderResponse{}, nil
-}
-
-type authMaintenanceServer struct {
- *maintenanceServer
- ag AuthGetter
-}
-
-func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
- authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
- if err != nil {
- return err
- }
-
- return ams.ag.AuthStore().IsAdminPermitted(authInfo)
-}
-
-func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
- }
-
- return ams.maintenanceServer.Defragment(ctx, sr)
-}
-
-func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
- if err := ams.isAuthenticated(srv.Context()); err != nil {
- return err
- }
-
- return ams.maintenanceServer.Snapshot(sr, srv)
-}
-
-func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
- }
-
- return ams.maintenanceServer.Hash(ctx, r)
-}
-
-func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
- }
- return ams.maintenanceServer.HashKV(ctx, r)
-}
-
-func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
- return ams.maintenanceServer.Status(ctx, ar)
-}
-
-func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
- return ams.maintenanceServer.MoveLeader(ctx, tr)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
deleted file mode 100644
index cbe7b47..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "time"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-)
-
-type ClusterServer struct {
- cluster api.Cluster
- server etcdserver.ServerV3
-}
-
-func NewClusterServer(s etcdserver.ServerV3) *ClusterServer {
- return &ClusterServer{
- cluster: s.Cluster(),
- server: s,
- }
-}
-
-func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
- urls, err := types.NewURLs(r.PeerURLs)
- if err != nil {
- return nil, rpctypes.ErrGRPCMemberBadURLs
- }
-
- now := time.Now()
- m := membership.NewMember("", urls, "", &now)
- membs, merr := cs.server.AddMember(ctx, *m)
- if merr != nil {
- return nil, togRPCError(merr)
- }
-
- return &pb.MemberAddResponse{
- Header: cs.header(),
- Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
- Members: membersToProtoMembers(membs),
- }, nil
-}
-
-func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
- membs, err := cs.server.RemoveMember(ctx, r.ID)
- if err != nil {
- return nil, togRPCError(err)
- }
- return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
-}
-
-func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
- m := membership.Member{
- ID: types.ID(r.ID),
- RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
- }
- membs, err := cs.server.UpdateMember(ctx, m)
- if err != nil {
- return nil, togRPCError(err)
- }
- return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
-}
-
-func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
- membs := membersToProtoMembers(cs.cluster.Members())
- return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
-}
-
-func (cs *ClusterServer) header() *pb.ResponseHeader {
- return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()}
-}
-
-func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
- protoMembs := make([]*pb.Member, len(membs))
- for i := range membs {
- protoMembs[i] = &pb.Member{
- Name: membs[i].Name,
- ID: uint64(membs[i].ID),
- PeerURLs: membs[i].PeerURLs,
- ClientURLs: membs[i].ClientURLs,
- }
- }
- return protoMembs
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
deleted file mode 100644
index 6cb41a6..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "client_grpc_sent_bytes_total",
- Help: "The total number of bytes sent to grpc clients.",
- })
-
- receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "client_grpc_received_bytes_total",
- Help: "The total number of bytes received from grpc clients.",
- })
-)
-
-func init() {
- prometheus.MustRegister(sentBytes)
- prometheus.MustRegister(receivedBytes)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
deleted file mode 100644
index 02d9960..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
-)
-
-type quotaKVServer struct {
- pb.KVServer
- qa quotaAlarmer
-}
-
-type quotaAlarmer struct {
- q etcdserver.Quota
- a Alarmer
- id types.ID
-}
-
-// check whether request satisfies the quota. If there is not enough space,
-// ignore request and raise the free space alarm.
-func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
- if qa.q.Available(r) {
- return nil
- }
- req := &pb.AlarmRequest{
- MemberID: uint64(qa.id),
- Action: pb.AlarmRequest_ACTIVATE,
- Alarm: pb.AlarmType_NOSPACE,
- }
- qa.a.Alarm(ctx, req)
- return rpctypes.ErrGRPCNoSpace
-}
-
-func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
- return "aKVServer{
- NewKVServer(s),
- quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
- }
-}
-
-func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
- if err := s.qa.check(ctx, r); err != nil {
- return nil, err
- }
- return s.KVServer.Put(ctx, r)
-}
-
-func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if err := s.qa.check(ctx, r); err != nil {
- return nil, err
- }
- return s.KVServer.Txn(ctx, r)
-}
-
-type quotaLeaseServer struct {
- pb.LeaseServer
- qa quotaAlarmer
-}
-
-func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- if err := s.qa.check(ctx, cr); err != nil {
- return nil, err
- }
- return s.LeaseServer.LeaseGrant(ctx, cr)
-}
-
-func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
- return "aLeaseServer{
- NewLeaseServer(s),
- quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
deleted file mode 100644
index 799c119..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "strings"
-
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var toGRPCErrorMap = map[error]error{
- membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound,
- membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound,
- membership.ErrIDExists: rpctypes.ErrGRPCMemberExist,
- membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist,
- etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
-
- mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted,
- mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev,
- etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
- etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace,
- etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
-
- etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
- etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
- etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
- etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
- etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
- etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
- etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
- etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
- etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
-
- lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound,
- lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist,
- lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge,
-
- auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist,
- auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist,
- auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist,
- auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty,
- auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound,
- auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist,
- auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
- auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
- auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
- auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
- auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
- auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
- auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
- auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
-}
-
-func togRPCError(err error) error {
- // let gRPC server convert to codes.Canceled, codes.DeadlineExceeded
- if err == context.Canceled || err == context.DeadlineExceeded {
- return err
- }
- grpcErr, ok := toGRPCErrorMap[err]
- if !ok {
- return status.Error(codes.Unknown, err.Error())
- }
- return grpcErr
-}
-
-func isClientCtxErr(ctxErr error, err error) bool {
- if ctxErr != nil {
- return true
- }
-
- ev, ok := status.FromError(err)
- if !ok {
- return false
- }
-
- switch ev.Code() {
- case codes.Canceled, codes.DeadlineExceeded:
- // client-side context cancel or deadline exceeded
- // "rpc error: code = Canceled desc = context canceled"
- // "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
- return true
- case codes.Unavailable:
- msg := ev.Message()
- // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected")
- // "rpc error: code = Unavailable desc = client disconnected"
- if msg == "client disconnected" {
- return true
- }
- // "grpc/transport.ClientTransport.CloseStream" on canceled streams
- // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL")
- if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
deleted file mode 100644
index dd4f329..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "io"
- "sync"
- "time"
-
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-type watchServer struct {
- clusterID int64
- memberID int64
- raftTimer etcdserver.RaftTimer
- watchable mvcc.WatchableKV
-
- ag AuthGetter
-}
-
-func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
- return &watchServer{
- clusterID: int64(s.Cluster().ID()),
- memberID: int64(s.ID()),
- raftTimer: s,
- watchable: s.Watchable(),
- ag: s,
- }
-}
-
-var (
- // External test can read this with GetProgressReportInterval()
- // and change this to a small value to finish fast with
- // SetProgressReportInterval().
- progressReportInterval = 10 * time.Minute
- progressReportIntervalMu sync.RWMutex
-)
-
-func GetProgressReportInterval() time.Duration {
- progressReportIntervalMu.RLock()
- defer progressReportIntervalMu.RUnlock()
- return progressReportInterval
-}
-
-func SetProgressReportInterval(newTimeout time.Duration) {
- progressReportIntervalMu.Lock()
- defer progressReportIntervalMu.Unlock()
- progressReportInterval = newTimeout
-}
-
-const (
- // We send ctrl response inside the read loop. We do not want
- // send to block read, but we still want ctrl response we sent to
- // be serialized. Thus we use a buffered chan to solve the problem.
- // A small buffer should be OK for most cases, since we expect the
- // ctrl requests are infrequent.
- ctrlStreamBufLen = 16
-)
-
-// serverWatchStream is an etcd server side stream. It receives requests
-// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
-// and creates responses that forwarded to gRPC stream.
-// It also forwards control message like watch created and canceled.
-type serverWatchStream struct {
- clusterID int64
- memberID int64
- raftTimer etcdserver.RaftTimer
-
- watchable mvcc.WatchableKV
-
- gRPCStream pb.Watch_WatchServer
- watchStream mvcc.WatchStream
- ctrlStream chan *pb.WatchResponse
-
- // mu protects progress, prevKV
- mu sync.Mutex
- // progress tracks the watchID that stream might need to send
- // progress to.
- // TODO: combine progress and prevKV into a single struct?
- progress map[mvcc.WatchID]bool
- prevKV map[mvcc.WatchID]bool
-
- // closec indicates the stream is closed.
- closec chan struct{}
-
- // wg waits for the send loop to complete
- wg sync.WaitGroup
-
- ag AuthGetter
-}
-
-func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
- sws := serverWatchStream{
- clusterID: ws.clusterID,
- memberID: ws.memberID,
- raftTimer: ws.raftTimer,
-
- watchable: ws.watchable,
-
- gRPCStream: stream,
- watchStream: ws.watchable.NewWatchStream(),
- // chan for sending control response like watcher created and canceled.
- ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
- progress: make(map[mvcc.WatchID]bool),
- prevKV: make(map[mvcc.WatchID]bool),
- closec: make(chan struct{}),
-
- ag: ws.ag,
- }
-
- sws.wg.Add(1)
- go func() {
- sws.sendLoop()
- sws.wg.Done()
- }()
-
- errc := make(chan error, 1)
- // Ideally recvLoop would also use sws.wg to signal its completion
- // but when stream.Context().Done() is closed, the stream's recv
- // may continue to block since it uses a different context, leading to
- // deadlock when calling sws.close().
- go func() {
- if rerr := sws.recvLoop(); rerr != nil {
- if isClientCtxErr(stream.Context().Err(), rerr) {
- plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
- } else {
- plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
- }
- errc <- rerr
- }
- }()
- select {
- case err = <-errc:
- close(sws.ctrlStream)
- case <-stream.Context().Done():
- err = stream.Context().Err()
- // the only server-side cancellation is noleader for now.
- if err == context.Canceled {
- err = rpctypes.ErrGRPCNoLeader
- }
- }
- sws.close()
- return err
-}
-
-func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
- authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
- if err != nil {
- return false
- }
- if authInfo == nil {
- // if auth is enabled, IsRangePermitted() can cause an error
- authInfo = &auth.AuthInfo{}
- }
-
- return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
-}
-
-func (sws *serverWatchStream) recvLoop() error {
- for {
- req, err := sws.gRPCStream.Recv()
- if err == io.EOF {
- return nil
- }
- if err != nil {
- return err
- }
-
- switch uv := req.RequestUnion.(type) {
- case *pb.WatchRequest_CreateRequest:
- if uv.CreateRequest == nil {
- break
- }
-
- creq := uv.CreateRequest
- if len(creq.Key) == 0 {
- // \x00 is the smallest key
- creq.Key = []byte{0}
- }
- if len(creq.RangeEnd) == 0 {
- // force nil since watchstream.Watch distinguishes
- // between nil and []byte{} for single key / >=
- creq.RangeEnd = nil
- }
- if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
- // support >= key queries
- creq.RangeEnd = []byte{}
- }
-
- if !sws.isWatchPermitted(creq) {
- wr := &pb.WatchResponse{
- Header: sws.newResponseHeader(sws.watchStream.Rev()),
- WatchId: -1,
- Canceled: true,
- Created: true,
- CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
- }
-
- select {
- case sws.ctrlStream <- wr:
- case <-sws.closec:
- }
- return nil
- }
-
- filters := FiltersFromRequest(creq)
-
- wsrev := sws.watchStream.Rev()
- rev := creq.StartRevision
- if rev == 0 {
- rev = wsrev + 1
- }
- id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
- if id != -1 {
- sws.mu.Lock()
- if creq.ProgressNotify {
- sws.progress[id] = true
- }
- if creq.PrevKv {
- sws.prevKV[id] = true
- }
- sws.mu.Unlock()
- }
- wr := &pb.WatchResponse{
- Header: sws.newResponseHeader(wsrev),
- WatchId: int64(id),
- Created: true,
- Canceled: id == -1,
- }
- select {
- case sws.ctrlStream <- wr:
- case <-sws.closec:
- return nil
- }
- case *pb.WatchRequest_CancelRequest:
- if uv.CancelRequest != nil {
- id := uv.CancelRequest.WatchId
- err := sws.watchStream.Cancel(mvcc.WatchID(id))
- if err == nil {
- sws.ctrlStream <- &pb.WatchResponse{
- Header: sws.newResponseHeader(sws.watchStream.Rev()),
- WatchId: id,
- Canceled: true,
- }
- sws.mu.Lock()
- delete(sws.progress, mvcc.WatchID(id))
- delete(sws.prevKV, mvcc.WatchID(id))
- sws.mu.Unlock()
- }
- }
- default:
- // we probably should not shutdown the entire stream when
- // receive an valid command.
- // so just do nothing instead.
- continue
- }
- }
-}
-
-func (sws *serverWatchStream) sendLoop() {
- // watch ids that are currently active
- ids := make(map[mvcc.WatchID]struct{})
- // watch responses pending on a watch id creation message
- pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
-
- interval := GetProgressReportInterval()
- progressTicker := time.NewTicker(interval)
-
- defer func() {
- progressTicker.Stop()
- // drain the chan to clean up pending events
- for ws := range sws.watchStream.Chan() {
- mvcc.ReportEventReceived(len(ws.Events))
- }
- for _, wrs := range pending {
- for _, ws := range wrs {
- mvcc.ReportEventReceived(len(ws.Events))
- }
- }
- }()
-
- for {
- select {
- case wresp, ok := <-sws.watchStream.Chan():
- if !ok {
- return
- }
-
- // TODO: evs is []mvccpb.Event type
- // either return []*mvccpb.Event from the mvcc package
- // or define protocol buffer with []mvccpb.Event.
- evs := wresp.Events
- events := make([]*mvccpb.Event, len(evs))
- sws.mu.Lock()
- needPrevKV := sws.prevKV[wresp.WatchID]
- sws.mu.Unlock()
- for i := range evs {
- events[i] = &evs[i]
-
- if needPrevKV {
- opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
- r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
- if err == nil && len(r.KVs) != 0 {
- events[i].PrevKv = &(r.KVs[0])
- }
- }
- }
-
- canceled := wresp.CompactRevision != 0
- wr := &pb.WatchResponse{
- Header: sws.newResponseHeader(wresp.Revision),
- WatchId: int64(wresp.WatchID),
- Events: events,
- CompactRevision: wresp.CompactRevision,
- Canceled: canceled,
- }
-
- if _, hasId := ids[wresp.WatchID]; !hasId {
- // buffer if id not yet announced
- wrs := append(pending[wresp.WatchID], wr)
- pending[wresp.WatchID] = wrs
- continue
- }
-
- mvcc.ReportEventReceived(len(evs))
- if err := sws.gRPCStream.Send(wr); err != nil {
- if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
- plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to send watch response to gRPC stream (%q)", err.Error())
- }
- return
- }
-
- sws.mu.Lock()
- if len(evs) > 0 && sws.progress[wresp.WatchID] {
- // elide next progress update if sent a key update
- sws.progress[wresp.WatchID] = false
- }
- sws.mu.Unlock()
-
- case c, ok := <-sws.ctrlStream:
- if !ok {
- return
- }
-
- if err := sws.gRPCStream.Send(c); err != nil {
- if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
- plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
- }
- return
- }
-
- // track id creation
- wid := mvcc.WatchID(c.WatchId)
- if c.Canceled {
- delete(ids, wid)
- continue
- }
- if c.Created {
- // flush buffered events
- ids[wid] = struct{}{}
- for _, v := range pending[wid] {
- mvcc.ReportEventReceived(len(v.Events))
- if err := sws.gRPCStream.Send(v); err != nil {
- if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
- plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
- }
- return
- }
- }
- delete(pending, wid)
- }
- case <-progressTicker.C:
- sws.mu.Lock()
- for id, ok := range sws.progress {
- if ok {
- sws.watchStream.RequestProgress(id)
- }
- sws.progress[id] = true
- }
- sws.mu.Unlock()
- case <-sws.closec:
- return
- }
- }
-}
-
-func (sws *serverWatchStream) close() {
- sws.watchStream.Close()
- close(sws.closec)
- sws.wg.Wait()
-}
-
-func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
- return &pb.ResponseHeader{
- ClusterId: uint64(sws.clusterID),
- MemberId: uint64(sws.memberID),
- Revision: rev,
- RaftTerm: sws.raftTimer.Term(),
- }
-}
-
-func filterNoDelete(e mvccpb.Event) bool {
- return e.Type == mvccpb.DELETE
-}
-
-func filterNoPut(e mvccpb.Event) bool {
- return e.Type == mvccpb.PUT
-}
-
-func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
- filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
- for _, ft := range creq.Filters {
- switch ft {
- case pb.WatchCreateRequest_NOPUT:
- filters = append(filters, filterNoPut)
- case pb.WatchCreateRequest_NODELETE:
- filters = append(filters, filterNoDelete)
- default:
- }
- }
- return filters
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go
deleted file mode 100644
index 93e78e3..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/apply.go
+++ /dev/null
@@ -1,972 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "bytes"
- "context"
- "sort"
- "time"
-
- "github.com/coreos/etcd/auth"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/gogo/protobuf/proto"
-)
-
-const (
- warnApplyDuration = 100 * time.Millisecond
-)
-
-type applyResult struct {
- resp proto.Message
- err error
- // physc signals the physical effect of the request has completed in addition
- // to being logically reflected by the node. Currently only used for
- // Compaction requests.
- physc <-chan struct{}
-}
-
-// applierV3 is the interface for processing V3 raft messages
-type applierV3 interface {
- Apply(r *pb.InternalRaftRequest) *applyResult
-
- Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
- Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
- DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
- Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
- Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
-
- LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
- LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
-
- Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
-
- Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
-
- AuthEnable() (*pb.AuthEnableResponse, error)
- AuthDisable() (*pb.AuthDisableResponse, error)
-
- UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
- UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
- UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
- UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
- UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
- UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
- RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
- RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
- RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
- RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
- RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
- UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
- RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
-}
-
-type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error
-
-type applierV3backend struct {
- s *EtcdServer
-
- checkPut checkReqFunc
- checkRange checkReqFunc
-}
-
-func (s *EtcdServer) newApplierV3Backend() applierV3 {
- base := &applierV3backend{s: s}
- base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error {
- return base.checkRequestPut(rv, req)
- }
- base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error {
- return base.checkRequestRange(rv, req)
- }
- return base
-}
-
-func (s *EtcdServer) newApplierV3() applierV3 {
- return newAuthApplierV3(
- s.AuthStore(),
- newQuotaApplierV3(s, s.newApplierV3Backend()),
- s.lessor,
- )
-}
-
-func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
- ar := &applyResult{}
- defer func(start time.Time) {
- warnOfExpensiveRequest(start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
- }(time.Now())
-
- // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
- switch {
- case r.Range != nil:
- ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
- case r.Put != nil:
- ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
- case r.DeleteRange != nil:
- ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
- case r.Txn != nil:
- ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
- case r.Compaction != nil:
- ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
- case r.LeaseGrant != nil:
- ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
- case r.LeaseRevoke != nil:
- ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
- case r.Alarm != nil:
- ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
- case r.Authenticate != nil:
- ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
- case r.AuthEnable != nil:
- ar.resp, ar.err = a.s.applyV3.AuthEnable()
- case r.AuthDisable != nil:
- ar.resp, ar.err = a.s.applyV3.AuthDisable()
- case r.AuthUserAdd != nil:
- ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
- case r.AuthUserDelete != nil:
- ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
- case r.AuthUserChangePassword != nil:
- ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
- case r.AuthUserGrantRole != nil:
- ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
- case r.AuthUserGet != nil:
- ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
- case r.AuthUserRevokeRole != nil:
- ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
- case r.AuthRoleAdd != nil:
- ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
- case r.AuthRoleGrantPermission != nil:
- ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
- case r.AuthRoleGet != nil:
- ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
- case r.AuthRoleRevokePermission != nil:
- ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
- case r.AuthRoleDelete != nil:
- ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
- case r.AuthUserList != nil:
- ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
- case r.AuthRoleList != nil:
- ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
- default:
- panic("not implemented")
- }
- return ar
-}
-
-func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
- resp = &pb.PutResponse{}
- resp.Header = &pb.ResponseHeader{}
-
- val, leaseID := p.Value, lease.LeaseID(p.Lease)
- if txn == nil {
- if leaseID != lease.NoLease {
- if l := a.s.lessor.Lookup(leaseID); l == nil {
- return nil, lease.ErrLeaseNotFound
- }
- }
- txn = a.s.KV().Write()
- defer txn.End()
- }
-
- var rr *mvcc.RangeResult
- if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
- rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
- if err != nil {
- return nil, err
- }
- }
- if p.IgnoreValue || p.IgnoreLease {
- if rr == nil || len(rr.KVs) == 0 {
- // ignore_{lease,value} flag expects previous key-value pair
- return nil, ErrKeyNotFound
- }
- }
- if p.IgnoreValue {
- val = rr.KVs[0].Value
- }
- if p.IgnoreLease {
- leaseID = lease.LeaseID(rr.KVs[0].Lease)
- }
- if p.PrevKv {
- if rr != nil && len(rr.KVs) != 0 {
- resp.PrevKv = &rr.KVs[0]
- }
- }
-
- resp.Header.Revision = txn.Put(p.Key, val, leaseID)
- return resp, nil
-}
-
-func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- resp := &pb.DeleteRangeResponse{}
- resp.Header = &pb.ResponseHeader{}
- end := mkGteRange(dr.RangeEnd)
-
- if txn == nil {
- txn = a.s.kv.Write()
- defer txn.End()
- }
-
- if dr.PrevKv {
- rr, err := txn.Range(dr.Key, end, mvcc.RangeOptions{})
- if err != nil {
- return nil, err
- }
- if rr != nil {
- resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
- for i := range rr.KVs {
- resp.PrevKvs[i] = &rr.KVs[i]
- }
- }
- }
-
- resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end)
- return resp, nil
-}
-
-func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- resp := &pb.RangeResponse{}
- resp.Header = &pb.ResponseHeader{}
-
- if txn == nil {
- txn = a.s.kv.Read()
- defer txn.End()
- }
-
- limit := r.Limit
- if r.SortOrder != pb.RangeRequest_NONE ||
- r.MinModRevision != 0 || r.MaxModRevision != 0 ||
- r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
- // fetch everything; sort and truncate afterwards
- limit = 0
- }
- if limit > 0 {
- // fetch one extra for 'more' flag
- limit = limit + 1
- }
-
- ro := mvcc.RangeOptions{
- Limit: limit,
- Rev: r.Revision,
- Count: r.CountOnly,
- }
-
- rr, err := txn.Range(r.Key, mkGteRange(r.RangeEnd), ro)
- if err != nil {
- return nil, err
- }
-
- if r.MaxModRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
- pruneKVs(rr, f)
- }
- if r.MinModRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
- pruneKVs(rr, f)
- }
- if r.MaxCreateRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
- pruneKVs(rr, f)
- }
- if r.MinCreateRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
- pruneKVs(rr, f)
- }
-
- sortOrder := r.SortOrder
- if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
- // Since current mvcc.Range implementation returns results
- // sorted by keys in lexiographically ascending order,
- // sort ASCEND by default only when target is not 'KEY'
- sortOrder = pb.RangeRequest_ASCEND
- }
- if sortOrder != pb.RangeRequest_NONE {
- var sorter sort.Interface
- switch {
- case r.SortTarget == pb.RangeRequest_KEY:
- sorter = &kvSortByKey{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_VERSION:
- sorter = &kvSortByVersion{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_CREATE:
- sorter = &kvSortByCreate{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_MOD:
- sorter = &kvSortByMod{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_VALUE:
- sorter = &kvSortByValue{&kvSort{rr.KVs}}
- }
- switch {
- case sortOrder == pb.RangeRequest_ASCEND:
- sort.Sort(sorter)
- case sortOrder == pb.RangeRequest_DESCEND:
- sort.Sort(sort.Reverse(sorter))
- }
- }
-
- if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
- rr.KVs = rr.KVs[:r.Limit]
- resp.More = true
- }
-
- resp.Header.Revision = rr.Rev
- resp.Count = int64(rr.Count)
- resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
- for i := range rr.KVs {
- if r.KeysOnly {
- rr.KVs[i].Value = nil
- }
- resp.Kvs[i] = &rr.KVs[i]
- }
- return resp, nil
-}
-
-func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- isWrite := !isTxnReadonly(rt)
- txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
-
- txnPath := compareToPath(txn, rt)
- if isWrite {
- if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil {
- txn.End()
- return nil, err
- }
- }
- if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil {
- txn.End()
- return nil, err
- }
-
- txnResp, _ := newTxnResp(rt, txnPath)
-
- // When executing mutable txn ops, etcd must hold the txn lock so
- // readers do not see any intermediate results. Since writes are
- // serialized on the raft loop, the revision in the read view will
- // be the revision of the write txn.
- if isWrite {
- txn.End()
- txn = a.s.KV().Write()
- }
- a.applyTxn(txn, rt, txnPath, txnResp)
- rev := txn.Rev()
- if len(txn.Changes()) != 0 {
- rev++
- }
- txn.End()
-
- txnResp.Header.Revision = rev
- return txnResp, nil
-}
-
-// newTxnResp allocates a txn response for a txn request given a path.
-func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- resps := make([]*pb.ResponseOp, len(reqs))
- txnResp = &pb.TxnResponse{
- Responses: resps,
- Succeeded: txnPath[0],
- Header: &pb.ResponseHeader{},
- }
- for i, req := range reqs {
- switch tv := req.Request.(type) {
- case *pb.RequestOp_RequestRange:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}}
- case *pb.RequestOp_RequestPut:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}}
- case *pb.RequestOp_RequestDeleteRange:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}}
- case *pb.RequestOp_RequestTxn:
- resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:])
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
- txnPath = txnPath[1+txns:]
- txnCount += txns + 1
- default:
- }
- }
- return txnResp, txnCount
-}
-
-func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
- txnPath := make([]bool, 1)
- ops := rt.Success
- if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
- ops = rt.Failure
- }
- for _, op := range ops {
- tv, ok := op.Request.(*pb.RequestOp_RequestTxn)
- if !ok || tv.RequestTxn == nil {
- continue
- }
- txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
- }
- return txnPath
-}
-
-func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
- for _, c := range cmps {
- if !applyCompare(rv, c) {
- return false
- }
- }
- return true
-}
-
-// applyCompare applies the compare request.
-// If the comparison succeeds, it returns true. Otherwise, returns false.
-func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
- // TODO: possible optimizations
- // * chunk reads for large ranges to conserve memory
- // * rewrite rules for common patterns:
- // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
- // * caching
- rr, err := rv.Range(c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{})
- if err != nil {
- return false
- }
- if len(rr.KVs) == 0 {
- if c.Target == pb.Compare_VALUE {
- // Always fail if comparing a value on a key/keys that doesn't exist;
- // nil == empty string in grpc; no way to represent missing value
- return false
- }
- return compareKV(c, mvccpb.KeyValue{})
- }
- for _, kv := range rr.KVs {
- if !compareKV(c, kv) {
- return false
- }
- }
- return true
-}
-
-func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
- var result int
- rev := int64(0)
- switch c.Target {
- case pb.Compare_VALUE:
- v := []byte{}
- if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
- v = tv.Value
- }
- result = bytes.Compare(ckv.Value, v)
- case pb.Compare_CREATE:
- if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil {
- rev = tv.CreateRevision
- }
- result = compareInt64(ckv.CreateRevision, rev)
- case pb.Compare_MOD:
- if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil {
- rev = tv.ModRevision
- }
- result = compareInt64(ckv.ModRevision, rev)
- case pb.Compare_VERSION:
- if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil {
- rev = tv.Version
- }
- result = compareInt64(ckv.Version, rev)
- case pb.Compare_LEASE:
- if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
- rev = tv.Lease
- }
- result = compareInt64(ckv.Lease, rev)
- }
- switch c.Result {
- case pb.Compare_EQUAL:
- return result == 0
- case pb.Compare_NOT_EQUAL:
- return result != 0
- case pb.Compare_GREATER:
- return result > 0
- case pb.Compare_LESS:
- return result < 0
- }
- return true
-}
-
-func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) {
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- for i, req := range reqs {
- respi := tresp.Responses[i].Response
- switch tv := req.Request.(type) {
- case *pb.RequestOp_RequestRange:
- resp, err := a.Range(txn, tv.RequestRange)
- if err != nil {
- plog.Panicf("unexpected error during txn: %v", err)
- }
- respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
- case *pb.RequestOp_RequestPut:
- resp, err := a.Put(txn, tv.RequestPut)
- if err != nil {
- plog.Panicf("unexpected error during txn: %v", err)
- }
- respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
- case *pb.RequestOp_RequestDeleteRange:
- resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
- if err != nil {
- plog.Panicf("unexpected error during txn: %v", err)
- }
- respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
- case *pb.RequestOp_RequestTxn:
- resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn
- applyTxns := a.applyTxn(txn, tv.RequestTxn, txnPath[1:], resp)
- txns += applyTxns + 1
- txnPath = txnPath[applyTxns+1:]
- default:
- // empty union
- }
- }
- return txns
-}
-
-func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
- resp := &pb.CompactionResponse{}
- resp.Header = &pb.ResponseHeader{}
- ch, err := a.s.KV().Compact(compaction.Revision)
- if err != nil {
- return nil, ch, err
- }
- // get the current revision. which key to get is not important.
- rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
- resp.Header.Revision = rr.Rev
- return resp, ch, err
-}
-
-func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
- resp := &pb.LeaseGrantResponse{}
- if err == nil {
- resp.ID = int64(l.ID)
- resp.TTL = l.TTL()
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
- return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
-}
-
-func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- resp := &pb.AlarmResponse{}
- oldCount := len(a.s.alarmStore.Get(ar.Alarm))
-
- switch ar.Action {
- case pb.AlarmRequest_GET:
- resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
- case pb.AlarmRequest_ACTIVATE:
- m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
- if m == nil {
- break
- }
- resp.Alarms = append(resp.Alarms, m)
- activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
- if !activated {
- break
- }
-
- plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID))
- switch m.Alarm {
- case pb.AlarmType_CORRUPT:
- a.s.applyV3 = newApplierV3Corrupt(a)
- case pb.AlarmType_NOSPACE:
- a.s.applyV3 = newApplierV3Capped(a)
- default:
- plog.Errorf("unimplemented alarm activation (%+v)", m)
- }
- case pb.AlarmRequest_DEACTIVATE:
- m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
- if m == nil {
- break
- }
- resp.Alarms = append(resp.Alarms, m)
- deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
- if !deactivated {
- break
- }
-
- switch m.Alarm {
- case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
- // TODO: check kv hash before deactivating CORRUPT?
- plog.Infof("alarm disarmed %+v", ar)
- a.s.applyV3 = a.s.newApplierV3()
- default:
- plog.Errorf("unimplemented alarm deactivation (%+v)", m)
- }
- default:
- return nil, nil
- }
- return resp, nil
-}
-
-type applierV3Capped struct {
- applierV3
- q backendQuota
-}
-
-// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
-// with Puts so that the number of keys in the store is capped.
-func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
-
-func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
- return nil, ErrNoSpace
-}
-
-func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if a.q.Cost(r) > 0 {
- return nil, ErrNoSpace
- }
- return a.applierV3.Txn(r)
-}
-
-func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return nil, ErrNoSpace
-}
-
-func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
- err := a.s.AuthStore().AuthEnable()
- if err != nil {
- return nil, err
- }
- return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
-}
-
-func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
- a.s.AuthStore().AuthDisable()
- return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
-}
-
-func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
- ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
- resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- resp, err := a.s.AuthStore().UserAdd(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- resp, err := a.s.AuthStore().UserDelete(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- resp, err := a.s.AuthStore().UserChangePassword(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- resp, err := a.s.AuthStore().UserGrantRole(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- resp, err := a.s.AuthStore().UserGet(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- resp, err := a.s.AuthStore().UserRevokeRole(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- resp, err := a.s.AuthStore().RoleAdd(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- resp, err := a.s.AuthStore().RoleGrantPermission(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- resp, err := a.s.AuthStore().RoleGet(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- resp, err := a.s.AuthStore().RoleRevokePermission(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- resp, err := a.s.AuthStore().RoleDelete(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- resp, err := a.s.AuthStore().UserList(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- resp, err := a.s.AuthStore().RoleList(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-type quotaApplierV3 struct {
- applierV3
- q Quota
-}
-
-func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
- return "aApplierV3{app, NewBackendQuota(s)}
-}
-
-func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
- ok := a.q.Available(p)
- resp, err := a.applierV3.Put(txn, p)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, err
-}
-
-func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- ok := a.q.Available(rt)
- resp, err := a.applierV3.Txn(rt)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, err
-}
-
-func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- ok := a.q.Available(lc)
- resp, err := a.applierV3.LeaseGrant(lc)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, err
-}
-
-type kvSort struct{ kvs []mvccpb.KeyValue }
-
-func (s *kvSort) Swap(i, j int) {
- t := s.kvs[i]
- s.kvs[i] = s.kvs[j]
- s.kvs[j] = t
-}
-func (s *kvSort) Len() int { return len(s.kvs) }
-
-type kvSortByKey struct{ *kvSort }
-
-func (s *kvSortByKey) Less(i, j int) bool {
- return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
-}
-
-type kvSortByVersion struct{ *kvSort }
-
-func (s *kvSortByVersion) Less(i, j int) bool {
- return (s.kvs[i].Version - s.kvs[j].Version) < 0
-}
-
-type kvSortByCreate struct{ *kvSort }
-
-func (s *kvSortByCreate) Less(i, j int) bool {
- return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
-}
-
-type kvSortByMod struct{ *kvSort }
-
-func (s *kvSortByMod) Less(i, j int) bool {
- return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
-}
-
-type kvSortByValue struct{ *kvSort }
-
-func (s *kvSortByValue) Less(i, j int) bool {
- return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
-}
-
-func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) {
- txnCount := 0
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- for _, req := range reqs {
- if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil {
- txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f)
- if err != nil {
- return 0, err
- }
- txnCount += txns + 1
- txnPath = txnPath[txns+1:]
- continue
- }
- if err := f(rv, req); err != nil {
- return 0, err
- }
- }
- return txnCount, nil
-}
-
-func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
- tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut)
- if !ok || tv.RequestPut == nil {
- return nil
- }
- req := tv.RequestPut
- if req.IgnoreValue || req.IgnoreLease {
- // expects previous key-value, error if not exist
- rr, err := rv.Range(req.Key, nil, mvcc.RangeOptions{})
- if err != nil {
- return err
- }
- if rr == nil || len(rr.KVs) == 0 {
- return ErrKeyNotFound
- }
- }
- if lease.LeaseID(req.Lease) != lease.NoLease {
- if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
- return lease.ErrLeaseNotFound
- }
- }
- return nil
-}
-
-func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
- tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange)
- if !ok || tv.RequestRange == nil {
- return nil
- }
- req := tv.RequestRange
- switch {
- case req.Revision == 0:
- return nil
- case req.Revision > rv.Rev():
- return mvcc.ErrFutureRev
- case req.Revision < rv.FirstRev():
- return mvcc.ErrCompacted
- }
- return nil
-}
-
-func compareInt64(a, b int64) int {
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
-}
-
-// mkGteRange determines if the range end is a >= range. This works around grpc
-// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
-// If it is a GTE range, then []byte{} is returned to indicate the empty byte
-// string (vs nil being no byte string).
-func mkGteRange(rangeEnd []byte) []byte {
- if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
- return []byte{}
- }
- return rangeEnd
-}
-
-func noSideEffect(r *pb.InternalRaftRequest) bool {
- return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
-}
-
-func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
- f := func(ops []*pb.RequestOp) []*pb.RequestOp {
- j := 0
- for i := 0; i < len(ops); i++ {
- if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
- continue
- }
- ops[j] = ops[i]
- j++
- }
-
- return ops[:j]
- }
-
- txn.Success = f(txn.Success)
- txn.Failure = f(txn.Failure)
-}
-
-func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
- j := 0
- for i := range rr.KVs {
- rr.KVs[j] = rr.KVs[i]
- if !isPrunable(&rr.KVs[i]) {
- j++
- }
- }
- rr.KVs = rr.KVs[:j]
-}
-
-func newHeader(s *EtcdServer) *pb.ResponseHeader {
- return &pb.ResponseHeader{
- ClusterId: uint64(s.Cluster().ID()),
- MemberId: uint64(s.ID()),
- Revision: s.KV().Rev(),
- RaftTerm: s.Term(),
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
deleted file mode 100644
index ec93914..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "sync"
-
- "github.com/coreos/etcd/auth"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
-)
-
-type authApplierV3 struct {
- applierV3
- as auth.AuthStore
- lessor lease.Lessor
-
- // mu serializes Apply so that user isn't corrupted and so that
- // serialized requests don't leak data from TOCTOU errors
- mu sync.Mutex
-
- authInfo auth.AuthInfo
-}
-
-func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
- return &authApplierV3{applierV3: base, as: as, lessor: lessor}
-}
-
-func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
- aa.mu.Lock()
- defer aa.mu.Unlock()
- if r.Header != nil {
- // backward-compatible with pre-3.0 releases when internalRaftRequest
- // does not have header field
- aa.authInfo.Username = r.Header.Username
- aa.authInfo.Revision = r.Header.AuthRevision
- }
- if needAdminPermission(r) {
- if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &applyResult{err: err}
- }
- }
- ret := aa.applierV3.Apply(r)
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return ret
-}
-
-func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
- if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
- return nil, err
- }
-
- if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
- // The specified lease is already attached with a key that cannot
- // be written by this user. It means the user cannot revoke the
- // lease so attaching the lease to the newly written key should
- // be forbidden.
- return nil, err
- }
-
- if r.PrevKv {
- err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
- if err != nil {
- return nil, err
- }
- }
- return aa.applierV3.Put(txn, r)
-}
-
-func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
- return nil, err
- }
- return aa.applierV3.Range(txn, r)
-}
-
-func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
- return nil, err
- }
- if r.PrevKv {
- err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
- if err != nil {
- return nil, err
- }
- }
-
- return aa.applierV3.DeleteRange(txn, r)
-}
-
-func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
- for _, requ := range reqs {
- switch tv := requ.Request.(type) {
- case *pb.RequestOp_RequestRange:
- if tv.RequestRange == nil {
- continue
- }
-
- if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
- return err
- }
-
- case *pb.RequestOp_RequestPut:
- if tv.RequestPut == nil {
- continue
- }
-
- if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
- return err
- }
-
- case *pb.RequestOp_RequestDeleteRange:
- if tv.RequestDeleteRange == nil {
- continue
- }
-
- if tv.RequestDeleteRange.PrevKv {
- err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
- if err != nil {
- return err
- }
- }
-
- err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
- for _, c := range rt.Compare {
- if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil {
- return err
- }
- }
- if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
- return err
- }
- if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
- return err
- }
- return nil
-}
-
-func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
- return nil, err
- }
- return aa.applierV3.Txn(rt)
-}
-
-func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil {
- return nil, err
- }
- return aa.applierV3.LeaseRevoke(lc)
-}
-
-func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
- lease := aa.lessor.Lookup(leaseID)
- if lease != nil {
- for _, key := range lease.Keys() {
- if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- err := aa.as.IsAdminPermitted(&aa.authInfo)
- if err != nil && r.Name != aa.authInfo.Username {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &pb.AuthUserGetResponse{}, err
- }
-
- return aa.applierV3.UserGet(r)
-}
-
-func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- err := aa.as.IsAdminPermitted(&aa.authInfo)
- if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &pb.AuthRoleGetResponse{}, err
- }
-
- return aa.applierV3.RoleGet(r)
-}
-
-func needAdminPermission(r *pb.InternalRaftRequest) bool {
- switch {
- case r.AuthEnable != nil:
- return true
- case r.AuthDisable != nil:
- return true
- case r.AuthUserAdd != nil:
- return true
- case r.AuthUserDelete != nil:
- return true
- case r.AuthUserChangePassword != nil:
- return true
- case r.AuthUserGrantRole != nil:
- return true
- case r.AuthUserRevokeRole != nil:
- return true
- case r.AuthRoleAdd != nil:
- return true
- case r.AuthRoleGrantPermission != nil:
- return true
- case r.AuthRoleRevokePermission != nil:
- return true
- case r.AuthRoleDelete != nil:
- return true
- case r.AuthUserList != nil:
- return true
- case r.AuthRoleList != nil:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
deleted file mode 100644
index a49b682..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "encoding/json"
- "path"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/store"
- "github.com/coreos/go-semver/semver"
-)
-
-// ApplierV2 is the interface for processing V2 raft messages
-type ApplierV2 interface {
- Delete(r *RequestV2) Response
- Post(r *RequestV2) Response
- Put(r *RequestV2) Response
- QGet(r *RequestV2) Response
- Sync(r *RequestV2) Response
-}
-
-func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
- return &applierV2store{store: s, cluster: c}
-}
-
-type applierV2store struct {
- store store.Store
- cluster *membership.RaftCluster
-}
-
-func (a *applierV2store) Delete(r *RequestV2) Response {
- switch {
- case r.PrevIndex > 0 || r.PrevValue != "":
- return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
- default:
- return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
- }
-}
-
-func (a *applierV2store) Post(r *RequestV2) Response {
- return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, r.TTLOptions()))
-}
-
-func (a *applierV2store) Put(r *RequestV2) Response {
- ttlOptions := r.TTLOptions()
- exists, existsSet := pbutil.GetBool(r.PrevExist)
- switch {
- case existsSet:
- if exists {
- if r.PrevIndex == 0 && r.PrevValue == "" {
- return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
- }
- return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
- }
- return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
- case r.PrevIndex > 0 || r.PrevValue != "":
- return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
- default:
- if storeMemberAttributeRegexp.MatchString(r.Path) {
- id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
- var attr membership.Attributes
- if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
- plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
- }
- if a.cluster != nil {
- a.cluster.UpdateAttributes(id, attr)
- }
- // return an empty response since there is no consumer.
- return Response{}
- }
- if r.Path == membership.StoreClusterVersionKey() {
- if a.cluster != nil {
- a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
- }
- // return an empty response since there is no consumer.
- return Response{}
- }
- return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
- }
-}
-
-func (a *applierV2store) QGet(r *RequestV2) Response {
- return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
-}
-
-func (a *applierV2store) Sync(r *RequestV2) Response {
- a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
- return Response{}
-}
-
-// applyV2Request interprets r as a call to store.X and returns a Response interpreted
-// from store.Event
-func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
- defer warnOfExpensiveRequest(time.Now(), r, nil, nil)
-
- switch r.Method {
- case "POST":
- return s.applyV2.Post(r)
- case "PUT":
- return s.applyV2.Put(r)
- case "DELETE":
- return s.applyV2.Delete(r)
- case "QGET":
- return s.applyV2.QGet(r)
- case "SYNC":
- return s.applyV2.Sync(r)
- default:
- // This should never be reached, but just in case:
- return Response{Err: ErrUnknownMethod}
- }
-}
-
-func (r *RequestV2) TTLOptions() store.TTLOptionSet {
- refresh, _ := pbutil.GetBool(r.Refresh)
- ttlOptions := store.TTLOptionSet{Refresh: refresh}
- if r.Expiration != 0 {
- ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
- }
- return ttlOptions
-}
-
-func toResponse(ev *store.Event, err error) Response {
- return Response{Event: ev, Err: err}
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
deleted file mode 100644
index 8991675..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
+++ /dev/null
@@ -1,648 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package auth implements etcd authentication.
-package auth
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "path"
- "reflect"
- "sort"
- "strings"
- "time"
-
- etcderr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/pkg/capnslog"
-
- "golang.org/x/crypto/bcrypt"
-)
-
-const (
- // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data.
- StorePermsPrefix = "/2"
-
- // RootRoleName is the name of the ROOT role, with privileges to manage the cluster.
- RootRoleName = "root"
-
- // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user.
- GuestRoleName = "guest"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth")
-)
-
-var rootRole = Role{
- Role: RootRoleName,
- Permissions: Permissions{
- KV: RWPermission{
- Read: []string{"/*"},
- Write: []string{"/*"},
- },
- },
-}
-
-var guestRole = Role{
- Role: GuestRoleName,
- Permissions: Permissions{
- KV: RWPermission{
- Read: []string{"/*"},
- Write: []string{"/*"},
- },
- },
-}
-
-type doer interface {
- Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error)
-}
-
-type Store interface {
- AllUsers() ([]string, error)
- GetUser(name string) (User, error)
- CreateOrUpdateUser(user User) (out User, created bool, err error)
- CreateUser(user User) (User, error)
- DeleteUser(name string) error
- UpdateUser(user User) (User, error)
- AllRoles() ([]string, error)
- GetRole(name string) (Role, error)
- CreateRole(role Role) error
- DeleteRole(name string) error
- UpdateRole(role Role) (Role, error)
- AuthEnabled() bool
- EnableAuth() error
- DisableAuth() error
- PasswordStore
-}
-
-type PasswordStore interface {
- CheckPassword(user User, password string) bool
- HashPassword(password string) (string, error)
-}
-
-type store struct {
- server doer
- timeout time.Duration
- ensuredOnce bool
-
- PasswordStore
-}
-
-type User struct {
- User string `json:"user"`
- Password string `json:"password,omitempty"`
- Roles []string `json:"roles"`
- Grant []string `json:"grant,omitempty"`
- Revoke []string `json:"revoke,omitempty"`
-}
-
-type Role struct {
- Role string `json:"role"`
- Permissions Permissions `json:"permissions"`
- Grant *Permissions `json:"grant,omitempty"`
- Revoke *Permissions `json:"revoke,omitempty"`
-}
-
-type Permissions struct {
- KV RWPermission `json:"kv"`
-}
-
-func (p *Permissions) IsEmpty() bool {
- return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0)
-}
-
-type RWPermission struct {
- Read []string `json:"read"`
- Write []string `json:"write"`
-}
-
-type Error struct {
- Status int
- Errmsg string
-}
-
-func (ae Error) Error() string { return ae.Errmsg }
-func (ae Error) HTTPStatus() int { return ae.Status }
-
-func authErr(hs int, s string, v ...interface{}) Error {
- return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)}
-}
-
-func NewStore(server doer, timeout time.Duration) Store {
- s := &store{
- server: server,
- timeout: timeout,
- PasswordStore: passwordStore{},
- }
- return s
-}
-
-// passwordStore implements PasswordStore using bcrypt to hash user passwords
-type passwordStore struct{}
-
-func (_ passwordStore) CheckPassword(user User, password string) bool {
- err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
- return err == nil
-}
-
-func (_ passwordStore) HashPassword(password string) (string, error) {
- hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
- return string(hash), err
-}
-
-func (s *store) AllUsers() ([]string, error) {
- resp, err := s.requestResource("/users/", false, false)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return []string{}, nil
- }
- }
- return nil, err
- }
- var nodes []string
- for _, n := range resp.Event.Node.Nodes {
- _, user := path.Split(n.Key)
- nodes = append(nodes, user)
- }
- sort.Strings(nodes)
- return nodes, nil
-}
-
-func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
-
-// CreateOrUpdateUser should be only used for creating the new user or when you are not
-// sure if it is a create or update. (When only password is passed in, we are not sure
-// if it is a update or create)
-func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
- _, err = s.getUser(user.User, true)
- if err == nil {
- out, err = s.UpdateUser(user)
- return out, false, err
- }
- u, err := s.CreateUser(user)
- return u, true, err
-}
-
-func (s *store) CreateUser(user User) (User, error) {
- // Attach root role to root user.
- if user.User == "root" {
- user = attachRootRole(user)
- }
- u, err := s.createUserInternal(user)
- if err == nil {
- plog.Noticef("created user %s", user.User)
- }
- return u, err
-}
-
-func (s *store) createUserInternal(user User) (User, error) {
- if user.Password == "" {
- return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User)
- }
- hash, err := s.HashPassword(user.Password)
- if err != nil {
- return user, err
- }
- user.Password = hash
-
- _, err = s.createResource("/users/"+user.User, user)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- return user, authErr(http.StatusConflict, "User %s already exists.", user.User)
- }
- }
- }
- return user, err
-}
-
-func (s *store) DeleteUser(name string) error {
- if s.AuthEnabled() && name == "root" {
- return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.")
- }
- _, err := s.deleteResource("/users/" + name)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return authErr(http.StatusNotFound, "User %s does not exist", name)
- }
- }
- return err
- }
- plog.Noticef("deleted user %s", name)
- return nil
-}
-
-func (s *store) UpdateUser(user User) (User, error) {
- old, err := s.getUser(user.User, true)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User)
- }
- }
- return old, err
- }
-
- newUser, err := old.merge(user, s.PasswordStore)
- if err != nil {
- return old, err
- }
- if reflect.DeepEqual(old, newUser) {
- return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.")
- }
- _, err = s.updateResource("/users/"+user.User, newUser)
- if err == nil {
- plog.Noticef("updated user %s", user.User)
- }
- return newUser, err
-}
-
-func (s *store) AllRoles() ([]string, error) {
- nodes := []string{RootRoleName}
- resp, err := s.requestResource("/roles/", false, false)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return nodes, nil
- }
- }
- return nil, err
- }
- for _, n := range resp.Event.Node.Nodes {
- _, role := path.Split(n.Key)
- nodes = append(nodes, role)
- }
- sort.Strings(nodes)
- return nodes, nil
-}
-
-func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
-
-func (s *store) CreateRole(role Role) error {
- if role.Role == RootRoleName {
- return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
- }
- _, err := s.createResource("/roles/"+role.Role, role)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- return authErr(http.StatusConflict, "Role %s already exists.", role.Role)
- }
- }
- }
- if err == nil {
- plog.Noticef("created new role %s", role.Role)
- }
- return err
-}
-
-func (s *store) DeleteRole(name string) error {
- if name == RootRoleName {
- return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name)
- }
- _, err := s.deleteResource("/roles/" + name)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return authErr(http.StatusNotFound, "Role %s doesn't exist.", name)
- }
- }
- }
- if err == nil {
- plog.Noticef("deleted role %s", name)
- }
- return err
-}
-
-func (s *store) UpdateRole(role Role) (Role, error) {
- if role.Role == RootRoleName {
- return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
- }
- old, err := s.getRole(role.Role, true)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role)
- }
- }
- return old, err
- }
- newRole, err := old.merge(role)
- if err != nil {
- return old, err
- }
- if reflect.DeepEqual(old, newRole) {
- return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.")
- }
- _, err = s.updateResource("/roles/"+role.Role, newRole)
- if err == nil {
- plog.Noticef("updated role %s", role.Role)
- }
- return newRole, err
-}
-
-func (s *store) AuthEnabled() bool {
- return s.detectAuth()
-}
-
-func (s *store) EnableAuth() error {
- if s.AuthEnabled() {
- return authErr(http.StatusConflict, "already enabled")
- }
-
- if _, err := s.getUser("root", true); err != nil {
- return authErr(http.StatusConflict, "No root user available, please create one")
- }
- if _, err := s.getRole(GuestRoleName, true); err != nil {
- plog.Printf("no guest role access found, creating default")
- if err := s.CreateRole(guestRole); err != nil {
- plog.Errorf("error creating guest role. aborting auth enable.")
- return err
- }
- }
-
- if err := s.enableAuth(); err != nil {
- plog.Errorf("error enabling auth (%v)", err)
- return err
- }
-
- plog.Noticef("auth: enabled auth")
- return nil
-}
-
-func (s *store) DisableAuth() error {
- if !s.AuthEnabled() {
- return authErr(http.StatusConflict, "already disabled")
- }
-
- err := s.disableAuth()
- if err == nil {
- plog.Noticef("auth: disabled auth")
- } else {
- plog.Errorf("error disabling auth (%v)", err)
- }
- return err
-}
-
-// merge applies the properties of the passed-in User to the User on which it
-// is called and returns a new User with these modifications applied. Think of
-// all Users as immutable sets of data. Merge allows you to perform the set
-// operations (desired grants and revokes) atomically
-func (ou User) merge(nu User, s PasswordStore) (User, error) {
- var out User
- if ou.User != nu.User {
- return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User)
- }
- out.User = ou.User
- if nu.Password != "" {
- hash, err := s.HashPassword(nu.Password)
- if err != nil {
- return ou, err
- }
- out.Password = hash
- } else {
- out.Password = ou.Password
- }
- currentRoles := types.NewUnsafeSet(ou.Roles...)
- for _, g := range nu.Grant {
- if currentRoles.Contains(g) {
- plog.Noticef("granting duplicate role %s for user %s", g, nu.User)
- return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User))
- }
- currentRoles.Add(g)
- }
- for _, r := range nu.Revoke {
- if !currentRoles.Contains(r) {
- plog.Noticef("revoking ungranted role %s for user %s", r, nu.User)
- return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User))
- }
- currentRoles.Remove(r)
- }
- out.Roles = currentRoles.Values()
- sort.Strings(out.Roles)
- return out, nil
-}
-
-// merge for a role works the same as User above -- atomic Role application to
-// each of the substructures.
-func (r Role) merge(n Role) (Role, error) {
- var out Role
- var err error
- if r.Role != n.Role {
- return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role)
- }
- out.Role = r.Role
- out.Permissions, err = r.Permissions.Grant(n.Grant)
- if err != nil {
- return out, err
- }
- out.Permissions, err = out.Permissions.Revoke(n.Revoke)
- return out, err
-}
-
-func (r Role) HasKeyAccess(key string, write bool) bool {
- if r.Role == RootRoleName {
- return true
- }
- return r.Permissions.KV.HasAccess(key, write)
-}
-
-func (r Role) HasRecursiveAccess(key string, write bool) bool {
- if r.Role == RootRoleName {
- return true
- }
- return r.Permissions.KV.HasRecursiveAccess(key, write)
-}
-
-// Grant adds a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (p Permissions) Grant(n *Permissions) (Permissions, error) {
- var out Permissions
- var err error
- if n == nil {
- return p, nil
- }
- out.KV, err = p.KV.Grant(n.KV)
- return out, err
-}
-
-// Revoke removes a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (p Permissions) Revoke(n *Permissions) (Permissions, error) {
- var out Permissions
- var err error
- if n == nil {
- return p, nil
- }
- out.KV, err = p.KV.Revoke(n.KV)
- return out, err
-}
-
-// Grant adds a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) {
- var out RWPermission
- currentRead := types.NewUnsafeSet(rw.Read...)
- for _, r := range n.Read {
- if currentRead.Contains(r) {
- return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r)
- }
- currentRead.Add(r)
- }
- currentWrite := types.NewUnsafeSet(rw.Write...)
- for _, w := range n.Write {
- if currentWrite.Contains(w) {
- return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w)
- }
- currentWrite.Add(w)
- }
- out.Read = currentRead.Values()
- out.Write = currentWrite.Values()
- sort.Strings(out.Read)
- sort.Strings(out.Write)
- return out, nil
-}
-
-// Revoke removes a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (rw RWPermission) Revoke(n RWPermission) (RWPermission, error) {
- var out RWPermission
- currentRead := types.NewUnsafeSet(rw.Read...)
- for _, r := range n.Read {
- if !currentRead.Contains(r) {
- plog.Noticef("revoking ungranted read permission %s", r)
- continue
- }
- currentRead.Remove(r)
- }
- currentWrite := types.NewUnsafeSet(rw.Write...)
- for _, w := range n.Write {
- if !currentWrite.Contains(w) {
- plog.Noticef("revoking ungranted write permission %s", w)
- continue
- }
- currentWrite.Remove(w)
- }
- out.Read = currentRead.Values()
- out.Write = currentWrite.Values()
- sort.Strings(out.Read)
- sort.Strings(out.Write)
- return out, nil
-}
-
-func (rw RWPermission) HasAccess(key string, write bool) bool {
- var list []string
- if write {
- list = rw.Write
- } else {
- list = rw.Read
- }
- for _, pat := range list {
- match, err := simpleMatch(pat, key)
- if err == nil && match {
- return true
- }
- }
- return false
-}
-
-func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool {
- list := rw.Read
- if write {
- list = rw.Write
- }
- for _, pat := range list {
- match, err := prefixMatch(pat, key)
- if err == nil && match {
- return true
- }
- }
- return false
-}
-
-func simpleMatch(pattern string, key string) (match bool, err error) {
- if pattern[len(pattern)-1] == '*' {
- return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
- }
- return key == pattern, nil
-}
-
-func prefixMatch(pattern string, key string) (match bool, err error) {
- if pattern[len(pattern)-1] != '*' {
- return false, nil
- }
- return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
-}
-
-func attachRootRole(u User) User {
- inRoles := false
- for _, r := range u.Roles {
- if r == RootRoleName {
- inRoles = true
- break
- }
- }
- if !inRoles {
- u.Roles = append(u.Roles, RootRoleName)
- }
- return u
-}
-
-func (s *store) getUser(name string, quorum bool) (User, error) {
- resp, err := s.requestResource("/users/"+name, false, quorum)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
- }
- }
- return User{}, err
- }
- var u User
- err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
- if err != nil {
- return u, err
- }
- // Attach root role to root user.
- if u.User == "root" {
- u = attachRootRole(u)
- }
- return u, nil
-}
-
-func (s *store) getRole(name string, quorum bool) (Role, error) {
- if name == RootRoleName {
- return rootRole, nil
- }
- resp, err := s.requestResource("/roles/"+name, false, quorum)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
- }
- }
- return Role{}, err
- }
- var r Role
- err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
- return r, err
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
deleted file mode 100644
index 2464828..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
- "encoding/json"
- "path"
-
- etcderr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-func (s *store) ensureAuthDirectories() error {
- if s.ensuredOnce {
- return nil
- }
- for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} {
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- pe := false
- rr := etcdserverpb.Request{
- Method: "PUT",
- Path: res,
- Dir: true,
- PrevExist: &pe,
- }
- _, err := s.server.Do(ctx, rr)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- continue
- }
- }
- plog.Errorf("failed to create auth directories in the store (%v)", err)
- return err
- }
- }
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- pe := false
- rr := etcdserverpb.Request{
- Method: "PUT",
- Path: StorePermsPrefix + "/enabled",
- Val: "false",
- PrevExist: &pe,
- }
- _, err := s.server.Do(ctx, rr)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- s.ensuredOnce = true
- return nil
- }
- }
- return err
- }
- s.ensuredOnce = true
- return nil
-}
-
-func (s *store) enableAuth() error {
- _, err := s.updateResource("/enabled", true)
- return err
-}
-func (s *store) disableAuth() error {
- _, err := s.updateResource("/enabled", false)
- return err
-}
-
-func (s *store) detectAuth() bool {
- if s.server == nil {
- return false
- }
- value, err := s.requestResource("/enabled", false, false)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return false
- }
- }
- plog.Errorf("failed to detect auth settings (%s)", err)
- return false
- }
-
- var u bool
- err = json.Unmarshal([]byte(*value.Event.Node.Value), &u)
- if err != nil {
- plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err)
- return false
- }
- return u
-}
-
-func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) {
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- p := path.Join(StorePermsPrefix, res)
- method := "GET"
- if quorum {
- method = "QGET"
- }
- rr := etcdserverpb.Request{
- Method: method,
- Path: p,
- Dir: dir,
- }
- return s.server.Do(ctx, rr)
-}
-
-func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) {
- return s.setResource(res, value, true)
-}
-func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) {
- return s.setResource(res, value, false)
-}
-func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) {
- err := s.ensureAuthDirectories()
- if err != nil {
- return etcdserver.Response{}, err
- }
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- data, err := json.Marshal(value)
- if err != nil {
- return etcdserver.Response{}, err
- }
- p := path.Join(StorePermsPrefix, res)
- rr := etcdserverpb.Request{
- Method: "PUT",
- Path: p,
- Val: string(data),
- PrevExist: &prevexist,
- }
- return s.server.Do(ctx, rr)
-}
-
-func (s *store) deleteResource(res string) (etcdserver.Response, error) {
- err := s.ensureAuthDirectories()
- if err != nil {
- return etcdserver.Response{}, err
- }
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- pex := true
- p := path.Join(StorePermsPrefix, res)
- rr := etcdserverpb.Request{
- Method: "DELETE",
- Path: p,
- PrevExist: &pex,
- }
- return s.server.Do(ctx, rr)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go
deleted file mode 100644
index 647773d..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/backend.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "fmt"
- "os"
- "time"
-
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
-)
-
-func newBackend(cfg ServerConfig) backend.Backend {
- bcfg := backend.DefaultBackendConfig()
- bcfg.Path = cfg.backendPath()
- if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
- // permit 10% excess over quota for disarm
- bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
- }
- return backend.New(bcfg)
-}
-
-// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
-func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
- snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
- if err != nil {
- return nil, fmt.Errorf("database snapshot file path error: %v", err)
- }
- if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
- return nil, fmt.Errorf("rename snapshot file error: %v", err)
- }
- return openBackend(cfg), nil
-}
-
-// openBackend returns a backend using the current etcd db.
-func openBackend(cfg ServerConfig) backend.Backend {
- fn := cfg.backendPath()
- beOpened := make(chan backend.Backend)
- go func() {
- beOpened <- newBackend(cfg)
- }()
- select {
- case be := <-beOpened:
- return be
- case <-time.After(10 * time.Second):
- plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn)
- plog.Warningf("waiting for it to exit before starting...")
- }
- return <-beOpened
-}
-
-// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
-// before updating the backend db after persisting raft snapshot to disk,
-// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
-// case, replace the db with the snapshot db sent by the leader.
-func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
- var cIndex consistentIndex
- kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
- defer kv.Close()
- if snapshot.Metadata.Index <= kv.ConsistentIndex() {
- return oldbe, nil
- }
- oldbe.Close()
- return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
deleted file mode 100644
index f44862a..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "sort"
- "time"
-
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/version"
- "github.com/coreos/go-semver/semver"
-)
-
-// isMemberBootstrapped tries to check if the given member has been bootstrapped
-// in the given cluster.
-func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
- rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
- if err != nil {
- return false
- }
- id := cl.MemberByName(member).ID
- m := rcl.Member(id)
- if m == nil {
- return false
- }
- if len(m.ClientURLs) > 0 {
- return true
- }
- return false
-}
-
-// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
-// attempts to construct a Cluster by accessing the members endpoint on one of
-// these URLs. The first URL to provide a response is used. If no URLs provide
-// a response, or a Cluster cannot be successfully created from a received
-// response, an error is returned.
-// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
-// 10 second is enough for building connection and finishing request.
-func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
- return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
-}
-
-// If logerr is true, it prints out more error messages.
-func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
- cc := &http.Client{
- Transport: rt,
- Timeout: timeout,
- }
- for _, u := range urls {
- resp, err := cc.Get(u + "/members")
- if err != nil {
- if logerr {
- plog.Warningf("could not get cluster response from %s: %v", u, err)
- }
- continue
- }
- b, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- if logerr {
- plog.Warningf("could not read the body of cluster response: %v", err)
- }
- continue
- }
- var membs []*membership.Member
- if err = json.Unmarshal(b, &membs); err != nil {
- if logerr {
- plog.Warningf("could not unmarshal cluster response: %v", err)
- }
- continue
- }
- id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
- if err != nil {
- if logerr {
- plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
- }
- continue
- }
-
- // check the length of membership members
- // if the membership members are present then prepare and return raft cluster
- // if membership members are not present then the raft cluster formed will be
- // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
- if len(membs) > 0 {
- return membership.NewClusterFromMembers("", id, membs), nil
- }
-
- return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
- }
- return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
-}
-
-// getRemotePeerURLs returns peer urls of remote members in the cluster. The
-// returned list is sorted in ascending lexicographical order.
-func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
- us := make([]string, 0)
- for _, m := range cl.Members() {
- if m.Name == local {
- continue
- }
- us = append(us, m.PeerURLs...)
- }
- sort.Strings(us)
- return us
-}
-
-// getVersions returns the versions of the members in the given cluster.
-// The key of the returned map is the member's ID. The value of the returned map
-// is the semver versions string, including server and cluster.
-// If it fails to get the version of a member, the key will be nil.
-func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
- members := cl.Members()
- vers := make(map[string]*version.Versions)
- for _, m := range members {
- if m.ID == local {
- cv := "not_decided"
- if cl.Version() != nil {
- cv = cl.Version().String()
- }
- vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
- continue
- }
- ver, err := getVersion(m, rt)
- if err != nil {
- plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
- vers[m.ID.String()] = nil
- } else {
- vers[m.ID.String()] = ver
- }
- }
- return vers
-}
-
-// decideClusterVersion decides the cluster version based on the versions map.
-// The returned version is the min server version in the map, or nil if the min
-// version in unknown.
-func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
- var cv *semver.Version
- lv := semver.Must(semver.NewVersion(version.Version))
-
- for mid, ver := range vers {
- if ver == nil {
- return nil
- }
- v, err := semver.NewVersion(ver.Server)
- if err != nil {
- plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
- return nil
- }
- if lv.LessThan(*v) {
- plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
- plog.Warningf("member %s has a higher version %s", mid, ver.Server)
- }
- if cv == nil {
- cv = v
- } else if v.LessThan(*cv) {
- cv = v
- }
- }
- return cv
-}
-
-// isCompatibleWithCluster return true if the local member has a compatible version with
-// the current running cluster.
-// The version is considered as compatible when at least one of the other members in the cluster has a
-// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
-// out of the range.
-// We set this rule since when the local member joins, another member might be offline.
-func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
- vers := getVersions(cl, local, rt)
- minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
- maxV := semver.Must(semver.NewVersion(version.Version))
- maxV = &semver.Version{
- Major: maxV.Major,
- Minor: maxV.Minor,
- }
-
- return isCompatibleWithVers(vers, local, minV, maxV)
-}
-
-func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
- var ok bool
- for id, v := range vers {
- // ignore comparison with local version
- if id == local.String() {
- continue
- }
- if v == nil {
- continue
- }
- clusterv, err := semver.NewVersion(v.Cluster)
- if err != nil {
- plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
- continue
- }
- if clusterv.LessThan(*minV) {
- plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
- return false
- }
- if maxV.LessThan(*clusterv) {
- plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
- return false
- }
- ok = true
- }
- return ok
-}
-
-// getVersion returns the Versions of the given member via its
-// peerURLs. Returns the last error if it fails to get the version.
-func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
- cc := &http.Client{
- Transport: rt,
- }
- var (
- err error
- resp *http.Response
- )
-
- for _, u := range m.PeerURLs {
- resp, err = cc.Get(u + "/version")
- if err != nil {
- plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
- continue
- }
- var b []byte
- b, err = ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
- continue
- }
- var vers version.Versions
- if err = json.Unmarshal(b, &vers); err != nil {
- plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
- continue
- }
- return &vers, nil
- }
- return nil, err
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go
deleted file mode 100644
index 295d952..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/config.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "fmt"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- "github.com/coreos/etcd/pkg/netutil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
-)
-
-// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
-type ServerConfig struct {
- Name string
- DiscoveryURL string
- DiscoveryProxy string
- ClientURLs types.URLs
- PeerURLs types.URLs
- DataDir string
- // DedicatedWALDir config will make the etcd to write the WAL to the WALDir
- // rather than the dataDir/member/wal.
- DedicatedWALDir string
- SnapCount uint64
- MaxSnapFiles uint
- MaxWALFiles uint
- InitialPeerURLsMap types.URLsMap
- InitialClusterToken string
- NewCluster bool
- ForceNewCluster bool
- PeerTLSInfo transport.TLSInfo
-
- TickMs uint
- ElectionTicks int
-
- // InitialElectionTickAdvance is true, then local member fast-forwards
- // election ticks to speed up "initial" leader election trigger. This
- // benefits the case of larger election ticks. For instance, cross
- // datacenter deployment may require longer election timeout of 10-second.
- // If true, local node does not need wait up to 10-second. Instead,
- // forwards its election ticks to 8-second, and have only 2-second left
- // before leader election.
- //
- // Major assumptions are that:
- // - cluster has no active leader thus advancing ticks enables faster
- // leader election, or
- // - cluster already has an established leader, and rejoining follower
- // is likely to receive heartbeats from the leader after tick advance
- // and before election timeout.
- //
- // However, when network from leader to rejoining follower is congested,
- // and the follower does not receive leader heartbeat within left election
- // ticks, disruptive election has to happen thus affecting cluster
- // availabilities.
- //
- // Disabling this would slow down initial bootstrap process for cross
- // datacenter deployments. Make your own tradeoffs by configuring
- // --initial-election-tick-advance at the cost of slow initial bootstrap.
- //
- // If single-node, it advances ticks regardless.
- //
- // See https://github.com/coreos/etcd/issues/9333 for more detail.
- InitialElectionTickAdvance bool
-
- BootstrapTimeout time.Duration
-
- AutoCompactionRetention time.Duration
- AutoCompactionMode string
- QuotaBackendBytes int64
- MaxTxnOps uint
-
- // MaxRequestBytes is the maximum request size to send over raft.
- MaxRequestBytes uint
-
- StrictReconfigCheck bool
-
- // ClientCertAuthEnabled is true when cert has been signed by the client CA.
- ClientCertAuthEnabled bool
-
- AuthToken string
-
- // InitialCorruptCheck is true to check data corruption on boot
- // before serving any peer/client traffic.
- InitialCorruptCheck bool
- CorruptCheckTime time.Duration
-
- Debug bool
-}
-
-// VerifyBootstrap sanity-checks the initial config for bootstrap case
-// and returns an error for things that should never happen.
-func (c *ServerConfig) VerifyBootstrap() error {
- if err := c.hasLocalMember(); err != nil {
- return err
- }
- if err := c.advertiseMatchesCluster(); err != nil {
- return err
- }
- if checkDuplicateURL(c.InitialPeerURLsMap) {
- return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
- }
- if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
- return fmt.Errorf("initial cluster unset and no discovery URL found")
- }
- return nil
-}
-
-// VerifyJoinExisting sanity-checks the initial config for join existing cluster
-// case and returns an error for things that should never happen.
-func (c *ServerConfig) VerifyJoinExisting() error {
- // The member has announced its peer urls to the cluster before starting; no need to
- // set the configuration again.
- if err := c.hasLocalMember(); err != nil {
- return err
- }
- if checkDuplicateURL(c.InitialPeerURLsMap) {
- return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
- }
- if c.DiscoveryURL != "" {
- return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
- }
- return nil
-}
-
-// hasLocalMember checks that the cluster at least contains the local server.
-func (c *ServerConfig) hasLocalMember() error {
- if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
- return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
- }
- return nil
-}
-
-// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
-func (c *ServerConfig) advertiseMatchesCluster() error {
- urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
- urls.Sort()
- sort.Strings(apurls)
- ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
- defer cancel()
- ok, err := netutil.URLStringsEqual(ctx, apurls, urls.StringSlice())
- if ok {
- return nil
- }
-
- initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
- for _, url := range c.PeerURLs {
- apMap[url.String()] = struct{}{}
- }
- for _, url := range c.InitialPeerURLsMap[c.Name] {
- initMap[url.String()] = struct{}{}
- }
-
- missing := []string{}
- for url := range initMap {
- if _, ok := apMap[url]; !ok {
- missing = append(missing, url)
- }
- }
- if len(missing) > 0 {
- for i := range missing {
- missing[i] = c.Name + "=" + missing[i]
- }
- mstr := strings.Join(missing, ",")
- apStr := strings.Join(apurls, ",")
- return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
- }
-
- for url := range apMap {
- if _, ok := initMap[url]; !ok {
- missing = append(missing, url)
- }
- }
- if len(missing) > 0 {
- mstr := strings.Join(missing, ",")
- umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
- return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String())
- }
-
- // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
- apStr := strings.Join(apurls, ",")
- umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
- return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err)
-}
-
-func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
-
-func (c *ServerConfig) WALDir() string {
- if c.DedicatedWALDir != "" {
- return c.DedicatedWALDir
- }
- return filepath.Join(c.MemberDir(), "wal")
-}
-
-func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
-
-func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
-
-// ReqTimeout returns timeout for request to finish.
-func (c *ServerConfig) ReqTimeout() time.Duration {
- // 5s for queue waiting, computation and disk IO delay
- // + 2 * election timeout for possible leader election
- return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
-}
-
-func (c *ServerConfig) electionTimeout() time.Duration {
- return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
-}
-
-func (c *ServerConfig) peerDialTimeout() time.Duration {
- // 1s for queue wait and election timeout
- return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
-}
-
-func (c *ServerConfig) PrintWithInitial() { c.print(true) }
-
-func (c *ServerConfig) Print() { c.print(false) }
-
-func (c *ServerConfig) print(initial bool) {
- plog.Infof("name = %s", c.Name)
- if c.ForceNewCluster {
- plog.Infof("force new cluster")
- }
- plog.Infof("data dir = %s", c.DataDir)
- plog.Infof("member dir = %s", c.MemberDir())
- if c.DedicatedWALDir != "" {
- plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
- }
- plog.Infof("heartbeat = %dms", c.TickMs)
- plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
- plog.Infof("snapshot count = %d", c.SnapCount)
- if len(c.DiscoveryURL) != 0 {
- plog.Infof("discovery URL= %s", c.DiscoveryURL)
- if len(c.DiscoveryProxy) != 0 {
- plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
- }
- }
- plog.Infof("advertise client URLs = %s", c.ClientURLs)
- if initial {
- plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
- plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
- }
-}
-
-func checkDuplicateURL(urlsmap types.URLsMap) bool {
- um := make(map[string]bool)
- for _, urls := range urlsmap {
- for _, url := range urls {
- u := url.String()
- if um[u] {
- return true
- }
- um[u] = true
- }
- }
- return false
-}
-
-func (c *ServerConfig) bootstrapTimeout() time.Duration {
- if c.BootstrapTimeout != 0 {
- return c.BootstrapTimeout
- }
- return time.Second
-}
-
-func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
deleted file mode 100644
index d513f67..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "sync/atomic"
-)
-
-// consistentIndex represents the offset of an entry in a consistent replica log.
-// It implements the mvcc.ConsistentIndexGetter interface.
-// It is always set to the offset of current entry before executing the entry,
-// so ConsistentWatchableKV could get the consistent index from it.
-type consistentIndex uint64
-
-func (i *consistentIndex) setConsistentIndex(v uint64) {
- atomic.StoreUint64((*uint64)(i), v)
-}
-
-func (i *consistentIndex) ConsistentIndex() uint64 {
- return atomic.LoadUint64((*uint64)(i))
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go b/vendor/github.com/coreos/etcd/etcdserver/corrupt.go
deleted file mode 100644
index d998ec5..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/pkg/types"
-)
-
-// CheckInitialHashKV compares initial hash values with its peers
-// before serving any peer/client traffic. Only mismatch when hashes
-// are different at requested revision, with same compact revision.
-func (s *EtcdServer) CheckInitialHashKV() error {
- if !s.Cfg.InitialCorruptCheck {
- return nil
- }
-
- plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
- h, rev, crev, err := s.kv.HashByRev(0)
- if err != nil {
- return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
- }
- peers := s.getPeerHashKVs(rev)
- mismatch := 0
- for _, p := range peers {
- if p.resp != nil {
- peerID := types.ID(p.resp.Header.MemberId)
- if h != p.resp.Hash {
- if crev == p.resp.CompactRevision {
- plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
- mismatch++
- } else {
- plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
- }
- }
- continue
- }
- if p.err != nil {
- switch p.err {
- case rpctypes.ErrFutureRev:
- plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
- case rpctypes.ErrCompacted:
- plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
- }
- }
- }
- if mismatch > 0 {
- return fmt.Errorf("%s found data inconsistency with peers", s.ID())
- }
-
- plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
- return nil
-}
-
-func (s *EtcdServer) monitorKVHash() {
- t := s.Cfg.CorruptCheckTime
- if t == 0 {
- return
- }
- plog.Infof("enabled corruption checking with %s interval", t)
- for {
- select {
- case <-s.stopping:
- return
- case <-time.After(t):
- }
- if !s.isLeader() {
- continue
- }
- if err := s.checkHashKV(); err != nil {
- plog.Debugf("check hash kv failed %v", err)
- }
- }
-}
-
-func (s *EtcdServer) checkHashKV() error {
- h, rev, crev, err := s.kv.HashByRev(0)
- if err != nil {
- plog.Fatalf("failed to hash kv store (%v)", err)
- }
- peers := s.getPeerHashKVs(rev)
-
- ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- err = s.linearizableReadNotify(ctx)
- cancel()
- if err != nil {
- return err
- }
-
- h2, rev2, crev2, err := s.kv.HashByRev(0)
- if err != nil {
- plog.Warningf("failed to hash kv store (%v)", err)
- return err
- }
-
- alarmed := false
- mismatch := func(id uint64) {
- if alarmed {
- return
- }
- alarmed = true
- a := &pb.AlarmRequest{
- MemberID: uint64(id),
- Action: pb.AlarmRequest_ACTIVATE,
- Alarm: pb.AlarmType_CORRUPT,
- }
- s.goAttach(func() {
- s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
- })
- }
-
- if h2 != h && rev2 == rev && crev == crev2 {
- plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
- mismatch(uint64(s.ID()))
- }
-
- for _, p := range peers {
- if p.resp == nil {
- continue
- }
- id := p.resp.Header.MemberId
-
- // leader expects follower's latest revision less than or equal to leader's
- if p.resp.Header.Revision > rev2 {
- plog.Warningf(
- "revision %d from member %v, expected at most %d",
- p.resp.Header.Revision,
- types.ID(id),
- rev2)
- mismatch(id)
- }
-
- // leader expects follower's latest compact revision less than or equal to leader's
- if p.resp.CompactRevision > crev2 {
- plog.Warningf(
- "compact revision %d from member %v, expected at most %d",
- p.resp.CompactRevision,
- types.ID(id),
- crev2,
- )
- mismatch(id)
- }
-
- // follower's compact revision is leader's old one, then hashes must match
- if p.resp.CompactRevision == crev && p.resp.Hash != h {
- plog.Warningf(
- "hash %d at revision %d from member %v, expected hash %d",
- p.resp.Hash,
- rev,
- types.ID(id),
- h,
- )
- mismatch(id)
- }
- }
- return nil
-}
-
-type peerHashKVResp struct {
- resp *clientv3.HashKVResponse
- err error
- eps []string
-}
-
-func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
- // TODO: handle the case when "s.cluster.Members" have not
- // been populated (e.g. no snapshot to load from disk)
- mbs := s.cluster.Members()
- pURLs := make([][]string, len(mbs))
- for _, m := range mbs {
- if m.ID == s.ID() {
- continue
- }
- pURLs = append(pURLs, m.PeerURLs)
- }
-
- for _, purls := range pURLs {
- if len(purls) == 0 {
- continue
- }
- cli, cerr := clientv3.New(clientv3.Config{
- DialTimeout: s.Cfg.ReqTimeout(),
- Endpoints: purls,
- })
- if cerr != nil {
- plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), purls, cerr.Error())
- continue
- }
-
- respsLen := len(resps)
- for _, c := range cli.Endpoints() {
- ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- var resp *clientv3.HashKVResponse
- resp, cerr = cli.HashKV(ctx, c, rev)
- cancel()
- if cerr == nil {
- resps = append(resps, &peerHashKVResp{resp: resp})
- break
- }
- plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
- }
- cli.Close()
-
- if respsLen == len(resps) {
- resps = append(resps, &peerHashKVResp{err: cerr, eps: purls})
- }
- }
- return resps
-}
-
-type applierV3Corrupt struct {
- applierV3
-}
-
-func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
-
-func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Range(txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
- return nil, nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- return nil, ErrCorrupt
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/github.com/coreos/etcd/etcdserver/doc.go
deleted file mode 100644
index b195d2d..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package etcdserver defines how etcd servers interact and store their states.
-package etcdserver
diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go
deleted file mode 100644
index fb93c4b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/errors.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "errors"
- "fmt"
-)
-
-var (
- ErrUnknownMethod = errors.New("etcdserver: unknown method")
- ErrStopped = errors.New("etcdserver: server stopped")
- ErrCanceled = errors.New("etcdserver: request cancelled")
- ErrTimeout = errors.New("etcdserver: request timed out")
- ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
- ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
- ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
- ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
- ErrNoLeader = errors.New("etcdserver: no leader")
- ErrNotLeader = errors.New("etcdserver: not leader")
- ErrRequestTooLarge = errors.New("etcdserver: request is too large")
- ErrNoSpace = errors.New("etcdserver: no space")
- ErrTooManyRequests = errors.New("etcdserver: too many requests")
- ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
- ErrKeyNotFound = errors.New("etcdserver: key not found")
- ErrCorrupt = errors.New("etcdserver: corrupt cluster")
-)
-
-type DiscoveryError struct {
- Op string
- Err error
-}
-
-func (e DiscoveryError) Error() string {
- return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
new file mode 100644
index 0000000..90045a5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
@@ -0,0 +1,1035 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: etcdserver.proto
+
+/*
+ Package etcdserverpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ etcdserver.proto
+ raft_internal.proto
+ rpc.proto
+
+ It has these top-level messages:
+ Request
+ Metadata
+ RequestHeader
+ InternalRaftRequest
+ EmptyResponse
+ InternalAuthenticateRequest
+ ResponseHeader
+ RangeRequest
+ RangeResponse
+ PutRequest
+ PutResponse
+ DeleteRangeRequest
+ DeleteRangeResponse
+ RequestOp
+ ResponseOp
+ Compare
+ TxnRequest
+ TxnResponse
+ CompactionRequest
+ CompactionResponse
+ HashRequest
+ HashKVRequest
+ HashKVResponse
+ HashResponse
+ SnapshotRequest
+ SnapshotResponse
+ WatchRequest
+ WatchCreateRequest
+ WatchCancelRequest
+ WatchResponse
+ LeaseGrantRequest
+ LeaseGrantResponse
+ LeaseRevokeRequest
+ LeaseRevokeResponse
+ LeaseKeepAliveRequest
+ LeaseKeepAliveResponse
+ LeaseTimeToLiveRequest
+ LeaseTimeToLiveResponse
+ LeaseLeasesRequest
+ LeaseStatus
+ LeaseLeasesResponse
+ Member
+ MemberAddRequest
+ MemberAddResponse
+ MemberRemoveRequest
+ MemberRemoveResponse
+ MemberUpdateRequest
+ MemberUpdateResponse
+ MemberListRequest
+ MemberListResponse
+ DefragmentRequest
+ DefragmentResponse
+ MoveLeaderRequest
+ MoveLeaderResponse
+ AlarmRequest
+ AlarmMember
+ AlarmResponse
+ StatusRequest
+ StatusResponse
+ AuthEnableRequest
+ AuthDisableRequest
+ AuthenticateRequest
+ AuthUserAddRequest
+ AuthUserGetRequest
+ AuthUserDeleteRequest
+ AuthUserChangePasswordRequest
+ AuthUserGrantRoleRequest
+ AuthUserRevokeRoleRequest
+ AuthRoleAddRequest
+ AuthRoleGetRequest
+ AuthUserListRequest
+ AuthRoleListRequest
+ AuthRoleDeleteRequest
+ AuthRoleGrantPermissionRequest
+ AuthRoleRevokePermissionRequest
+ AuthEnableResponse
+ AuthDisableResponse
+ AuthenticateResponse
+ AuthUserAddResponse
+ AuthUserGetResponse
+ AuthUserDeleteResponse
+ AuthUserChangePasswordResponse
+ AuthUserGrantRoleResponse
+ AuthUserRevokeRoleResponse
+ AuthRoleAddResponse
+ AuthRoleGetResponse
+ AuthRoleListResponse
+ AuthUserListResponse
+ AuthRoleDeleteResponse
+ AuthRoleGrantPermissionResponse
+ AuthRoleRevokePermissionResponse
+*/
+package etcdserverpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Request struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
+ Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
+ Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
+ Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
+ Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
+ PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
+ PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
+ PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
+ Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
+ Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
+ Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
+ Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
+ Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
+ Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
+ Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
+ Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
+ Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
+
+type Metadata struct {
+ NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
+ ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metadata) Reset() { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage() {}
+func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} }
+
+func init() {
+ proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
+ proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
+}
+func (m *Request) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
+ i += copy(dAtA[i:], m.Method)
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
+ i += copy(dAtA[i:], m.Path)
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
+ i += copy(dAtA[i:], m.Val)
+ dAtA[i] = 0x28
+ i++
+ if m.Dir {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
+ i += copy(dAtA[i:], m.PrevValue)
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
+ if m.PrevExist != nil {
+ dAtA[i] = 0x40
+ i++
+ if *m.PrevExist {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ dAtA[i] = 0x48
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
+ dAtA[i] = 0x50
+ i++
+ if m.Wait {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x58
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
+ dAtA[i] = 0x60
+ i++
+ if m.Recursive {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x68
+ i++
+ if m.Sorted {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x70
+ i++
+ if m.Quorum {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x78
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
+ dAtA[i] = 0x80
+ i++
+ dAtA[i] = 0x1
+ i++
+ if m.Stream {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ if m.Refresh != nil {
+ dAtA[i] = 0x88
+ i++
+ dAtA[i] = 0x1
+ i++
+ if *m.Refresh {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Request) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovEtcdserver(uint64(m.ID))
+ l = len(m.Method)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ l = len(m.Val)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ n += 2
+ l = len(m.PrevValue)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ n += 1 + sovEtcdserver(uint64(m.PrevIndex))
+ if m.PrevExist != nil {
+ n += 2
+ }
+ n += 1 + sovEtcdserver(uint64(m.Expiration))
+ n += 2
+ n += 1 + sovEtcdserver(uint64(m.Since))
+ n += 2
+ n += 2
+ n += 2
+ n += 1 + sovEtcdserver(uint64(m.Time))
+ n += 3
+ if m.Refresh != nil {
+ n += 3
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Metadata) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovEtcdserver(uint64(m.NodeID))
+ n += 1 + sovEtcdserver(uint64(m.ClusterID))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovEtcdserver(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozEtcdserver(x uint64) (n int) {
+ return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Request: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Method = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Val = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Dir = bool(v != 0)
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PrevValue = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
+ }
+ m.PrevIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.PrevIndex |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.PrevExist = &b
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
+ }
+ m.Expiration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Expiration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Wait = bool(v != 0)
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
+ }
+ m.Since = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Since |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Recursive = bool(v != 0)
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Sorted = bool(v != 0)
+ case 14:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Quorum = bool(v != 0)
+ case 15:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+ }
+ m.Time = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Time |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 16:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stream = bool(v != 0)
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Refresh = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEtcdserver(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ m.NodeID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NodeID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+ }
+ m.ClusterID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ClusterID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEtcdserver(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipEtcdserver(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthEtcdserver
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipEtcdserver(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
+
+var fileDescriptorEtcdserver = []byte{
+ // 380 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
+ 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
+ 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
+ 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
+ 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
+ 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
+ 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
+ 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
+ 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
+ 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
+ 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
+ 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
+ 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
+ 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
+ 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
+ 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
+ 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
+ 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
+ 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
+ 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
+ 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
+ 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
+ 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
+ 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
deleted file mode 100644
index c50525b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
+++ /dev/null
@@ -1,2134 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: etcdserver/etcdserverpb/rpc.proto
-
-/*
-Package etcdserverpb is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package gw
-
-import (
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.RangeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.PutRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DeleteRangeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.TxnRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.CompactionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) {
- var metadata runtime.ServerMetadata
- stream, err := client.Watch(ctx)
- if err != nil {
- grpclog.Printf("Failed to start streaming: %v", err)
- return nil, metadata, err
- }
- dec := marshaler.NewDecoder(req.Body)
- handleSend := func() error {
- var protoReq etcdserverpb.WatchRequest
- err = dec.Decode(&protoReq)
- if err == io.EOF {
- return err
- }
- if err != nil {
- grpclog.Printf("Failed to decode request: %v", err)
- return err
- }
- if err = stream.Send(&protoReq); err != nil {
- grpclog.Printf("Failed to send request: %v", err)
- return err
- }
- return nil
- }
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Printf("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
- go func() {
- for {
- if err := handleSend(); err != nil {
- break
- }
- }
- if err := stream.CloseSend(); err != nil {
- grpclog.Printf("Failed to terminate client stream: %v", err)
- }
- }()
- header, err := stream.Header()
- if err != nil {
- grpclog.Printf("Failed to get header from client: %v", err)
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-}
-
-func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseGrantRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseRevokeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) {
- var metadata runtime.ServerMetadata
- stream, err := client.LeaseKeepAlive(ctx)
- if err != nil {
- grpclog.Printf("Failed to start streaming: %v", err)
- return nil, metadata, err
- }
- dec := marshaler.NewDecoder(req.Body)
- handleSend := func() error {
- var protoReq etcdserverpb.LeaseKeepAliveRequest
- err = dec.Decode(&protoReq)
- if err == io.EOF {
- return err
- }
- if err != nil {
- grpclog.Printf("Failed to decode request: %v", err)
- return err
- }
- if err = stream.Send(&protoReq); err != nil {
- grpclog.Printf("Failed to send request: %v", err)
- return err
- }
- return nil
- }
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Printf("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
- go func() {
- for {
- if err := handleSend(); err != nil {
- break
- }
- }
- if err := stream.CloseSend(); err != nil {
- grpclog.Printf("Failed to terminate client stream: %v", err)
- }
- }()
- header, err := stream.Header()
- if err != nil {
- grpclog.Printf("Failed to get header from client: %v", err)
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-}
-
-func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseTimeToLiveRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseLeasesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberAddRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberRemoveRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberUpdateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberListRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AlarmRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.StatusRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DefragmentRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashKVRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.SnapshotRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- stream, err := client.Snapshot(ctx, &protoReq)
- if err != nil {
- return nil, metadata, err
- }
- header, err := stream.Header()
- if err != nil {
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-
-}
-
-func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MoveLeaderRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthEnableRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthDisableRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthenticateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserAddRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGetRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserListRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserDeleteRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserChangePasswordRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGrantRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserRevokeRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleAddRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGetRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleListRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleDeleteRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGrantPermissionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleRevokePermissionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterKVHandler(ctx, mux, conn)
-}
-
-// RegisterKVHandler registers the http handlers for service KV to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn))
-}
-
-// RegisterKVHandler registers the http handlers for service KV to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "KVClient" to call the correct interceptors.
-func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error {
-
- mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "range"}, ""))
-
- pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "put"}, ""))
-
- pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "deleterange"}, ""))
-
- pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "txn"}, ""))
-
- pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "compaction"}, ""))
-)
-
-var (
- forward_KV_Range_0 = runtime.ForwardResponseMessage
-
- forward_KV_Put_0 = runtime.ForwardResponseMessage
-
- forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage
-
- forward_KV_Txn_0 = runtime.ForwardResponseMessage
-
- forward_KV_Compact_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterWatchHandler(ctx, mux, conn)
-}
-
-// RegisterWatchHandler registers the http handlers for service Watch to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn))
-}
-
-// RegisterWatchHandler registers the http handlers for service Watch to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "WatchClient" to call the correct interceptors.
-func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error {
-
- mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3beta", "watch"}, ""))
-)
-
-var (
- forward_Watch_Watch_0 = runtime.ForwardResponseStream
-)
-
-// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterLeaseHandler(ctx, mux, conn)
-}
-
-// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn))
-}
-
-// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "LeaseClient" to call the correct interceptors.
-func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error {
-
- mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseLeases_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "grant"}, ""))
-
- pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "revoke"}, ""))
-
- pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "keepalive"}, ""))
-
- pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "timetolive"}, ""))
-
- pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "leases"}, ""))
-)
-
-var (
- forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
-
- forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterClusterHandler(ctx, mux, conn)
-}
-
-// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn))
-}
-
-// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "ClusterClient" to call the correct interceptors.
-func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error {
-
- mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "add"}, ""))
-
- pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "remove"}, ""))
-
- pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "update"}, ""))
-
- pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "list"}, ""))
-)
-
-var (
- forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterMaintenanceHandler(ctx, mux, conn)
-}
-
-// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn))
-}
-
-// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "MaintenanceClient" to call the correct interceptors.
-func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error {
-
- mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_HashKV_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "alarm"}, ""))
-
- pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "status"}, ""))
-
- pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "defragment"}, ""))
-
- pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, ""))
-
- pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, ""))
-
- pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "snapshot"}, ""))
-
- pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "transfer-leadership"}, ""))
-)
-
-var (
- forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Status_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_HashKV_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream
-
- forward_Maintenance_MoveLeader_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterAuthHandler(ctx, mux, conn)
-}
-
-// RegisterAuthHandler registers the http handlers for service Auth to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn))
-}
-
-// RegisterAuthHandler registers the http handlers for service Auth to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "AuthClient" to call the correct interceptors.
-func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error {
-
- mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "enable"}, ""))
-
- pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "disable"}, ""))
-
- pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "authenticate"}, ""))
-
- pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "add"}, ""))
-
- pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "get"}, ""))
-
- pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "list"}, ""))
-
- pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "delete"}, ""))
-
- pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "changepw"}, ""))
-
- pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "grant"}, ""))
-
- pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "revoke"}, ""))
-
- pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "add"}, ""))
-
- pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "get"}, ""))
-
- pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "list"}, ""))
-
- pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "delete"}, ""))
-
- pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "grant"}, ""))
-
- pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "revoke"}, ""))
-)
-
-var (
- forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage
-
- forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage
-
- forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserGet_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserList_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleList_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
new file mode 100644
index 0000000..3084c6c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
@@ -0,0 +1,2077 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft_internal.proto
+
+package etcdserverpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RequestHeader struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // username is a username that is associated with an auth token of gRPC connection
+ Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
+ // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+ AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
+}
+
+func (m *RequestHeader) Reset() { *m = RequestHeader{} }
+func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
+func (*RequestHeader) ProtoMessage() {}
+func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} }
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+type InternalRaftRequest struct {
+ Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"`
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"`
+ Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"`
+ Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"`
+ DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"`
+ Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"`
+ Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"`
+ LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"`
+ LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"`
+ Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"`
+ AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"`
+ AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"`
+ Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"`
+ AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"`
+ AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"`
+ AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"`
+ AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"`
+ AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"`
+ AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"`
+ AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"`
+ AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"`
+ AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"`
+ AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"`
+ AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"`
+ AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"`
+ AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"`
+}
+
+func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
+func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalRaftRequest) ProtoMessage() {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} }
+
+type EmptyResponse struct {
+}
+
+func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
+func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
+func (*EmptyResponse) ProtoMessage() {}
+func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} }
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+type InternalAuthenticateRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ // simple_token is generated in API layer (etcdserver/v3_server.go)
+ SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
+}
+
+func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} }
+func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalAuthenticateRequest) ProtoMessage() {}
+func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRaftInternal, []int{3}
+}
+
+func init() {
+ proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
+ proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
+ proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
+ proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
+}
+func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+ }
+ if len(m.Username) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
+ i += copy(dAtA[i:], m.Username)
+ }
+ if m.AuthRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
+ }
+ return i, nil
+}
+
+func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+ }
+ if m.V2 != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size()))
+ n1, err := m.V2.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.Range != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size()))
+ n2, err := m.Range.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if m.Put != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size()))
+ n3, err := m.Put.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ if m.DeleteRange != nil {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size()))
+ n4, err := m.DeleteRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ if m.Txn != nil {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size()))
+ n5, err := m.Txn.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ if m.Compaction != nil {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size()))
+ n6, err := m.Compaction.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ if m.LeaseGrant != nil {
+ dAtA[i] = 0x42
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size()))
+ n7, err := m.LeaseGrant.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ if m.LeaseRevoke != nil {
+ dAtA[i] = 0x4a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size()))
+ n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ if m.Alarm != nil {
+ dAtA[i] = 0x52
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size()))
+ n9, err := m.Alarm.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ if m.Header != nil {
+ dAtA[i] = 0xa2
+ i++
+ dAtA[i] = 0x6
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size()))
+ n10, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
+ if m.AuthEnable != nil {
+ dAtA[i] = 0xc2
+ i++
+ dAtA[i] = 0x3e
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size()))
+ n11, err := m.AuthEnable.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ }
+ if m.AuthDisable != nil {
+ dAtA[i] = 0x9a
+ i++
+ dAtA[i] = 0x3f
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size()))
+ n12, err := m.AuthDisable.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ }
+ if m.Authenticate != nil {
+ dAtA[i] = 0xa2
+ i++
+ dAtA[i] = 0x3f
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size()))
+ n13, err := m.Authenticate.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ if m.AuthUserAdd != nil {
+ dAtA[i] = 0xe2
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size()))
+ n14, err := m.AuthUserAdd.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ if m.AuthUserDelete != nil {
+ dAtA[i] = 0xea
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size()))
+ n15, err := m.AuthUserDelete.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ }
+ if m.AuthUserGet != nil {
+ dAtA[i] = 0xf2
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size()))
+ n16, err := m.AuthUserGet.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ if m.AuthUserChangePassword != nil {
+ dAtA[i] = 0xfa
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size()))
+ n17, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ if m.AuthUserGrantRole != nil {
+ dAtA[i] = 0x82
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size()))
+ n18, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ if m.AuthUserRevokeRole != nil {
+ dAtA[i] = 0x8a
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size()))
+ n19, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ }
+ if m.AuthUserList != nil {
+ dAtA[i] = 0x92
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size()))
+ n20, err := m.AuthUserList.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ if m.AuthRoleList != nil {
+ dAtA[i] = 0x9a
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size()))
+ n21, err := m.AuthRoleList.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ }
+ if m.AuthRoleAdd != nil {
+ dAtA[i] = 0x82
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size()))
+ n22, err := m.AuthRoleAdd.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ }
+ if m.AuthRoleDelete != nil {
+ dAtA[i] = 0x8a
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size()))
+ n23, err := m.AuthRoleDelete.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ }
+ if m.AuthRoleGet != nil {
+ dAtA[i] = 0x92
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size()))
+ n24, err := m.AuthRoleGet.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n24
+ }
+ if m.AuthRoleGrantPermission != nil {
+ dAtA[i] = 0x9a
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size()))
+ n25, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n25
+ }
+ if m.AuthRoleRevokePermission != nil {
+ dAtA[i] = 0xa2
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size()))
+ n26, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n26
+ }
+ return i, nil
+}
+
+func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ if len(m.SimpleToken) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
+ i += copy(dAtA[i:], m.SimpleToken)
+ }
+ return i, nil
+}
+
+func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *RequestHeader) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRaftInternal(uint64(m.ID))
+ }
+ l = len(m.Username)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRevision != 0 {
+ n += 1 + sovRaftInternal(uint64(m.AuthRevision))
+ }
+ return n
+}
+
+func (m *InternalRaftRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRaftInternal(uint64(m.ID))
+ }
+ if m.V2 != nil {
+ l = m.V2.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Range != nil {
+ l = m.Range.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Put != nil {
+ l = m.Put.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.DeleteRange != nil {
+ l = m.DeleteRange.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Txn != nil {
+ l = m.Txn.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Compaction != nil {
+ l = m.Compaction.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseGrant != nil {
+ l = m.LeaseGrant.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseRevoke != nil {
+ l = m.LeaseRevoke.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Alarm != nil {
+ l = m.Alarm.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthEnable != nil {
+ l = m.AuthEnable.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthDisable != nil {
+ l = m.AuthDisable.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Authenticate != nil {
+ l = m.Authenticate.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserAdd != nil {
+ l = m.AuthUserAdd.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserDelete != nil {
+ l = m.AuthUserDelete.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserGet != nil {
+ l = m.AuthUserGet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserChangePassword != nil {
+ l = m.AuthUserChangePassword.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserGrantRole != nil {
+ l = m.AuthUserGrantRole.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserRevokeRole != nil {
+ l = m.AuthUserRevokeRole.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserList != nil {
+ l = m.AuthUserList.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleList != nil {
+ l = m.AuthRoleList.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleAdd != nil {
+ l = m.AuthRoleAdd.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleDelete != nil {
+ l = m.AuthRoleDelete.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleGet != nil {
+ l = m.AuthRoleGet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleGrantPermission != nil {
+ l = m.AuthRoleGrantPermission.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleRevokePermission != nil {
+ l = m.AuthRoleRevokePermission.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ return n
+}
+
+func (m *EmptyResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *InternalAuthenticateRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ l = len(m.SimpleToken)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ return n
+}
+
+func sovRaftInternal(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRaftInternal(x uint64) (n int) {
+ return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RequestHeader) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Username = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+ }
+ m.AuthRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AuthRevision |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.V2 == nil {
+ m.V2 = &Request{}
+ }
+ if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Range == nil {
+ m.Range = &RangeRequest{}
+ }
+ if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Put == nil {
+ m.Put = &PutRequest{}
+ }
+ if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DeleteRange == nil {
+ m.DeleteRange = &DeleteRangeRequest{}
+ }
+ if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Txn == nil {
+ m.Txn = &TxnRequest{}
+ }
+ if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Compaction == nil {
+ m.Compaction = &CompactionRequest{}
+ }
+ if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseGrant == nil {
+ m.LeaseGrant = &LeaseGrantRequest{}
+ }
+ if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseRevoke == nil {
+ m.LeaseRevoke = &LeaseRevokeRequest{}
+ }
+ if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Alarm == nil {
+ m.Alarm = &AlarmRequest{}
+ }
+ if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 100:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &RequestHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1000:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthEnable == nil {
+ m.AuthEnable = &AuthEnableRequest{}
+ }
+ if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1011:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthDisable == nil {
+ m.AuthDisable = &AuthDisableRequest{}
+ }
+ if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1012:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Authenticate == nil {
+ m.Authenticate = &InternalAuthenticateRequest{}
+ }
+ if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1100:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserAdd == nil {
+ m.AuthUserAdd = &AuthUserAddRequest{}
+ }
+ if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1101:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserDelete == nil {
+ m.AuthUserDelete = &AuthUserDeleteRequest{}
+ }
+ if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1102:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserGet == nil {
+ m.AuthUserGet = &AuthUserGetRequest{}
+ }
+ if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1103:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserChangePassword == nil {
+ m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
+ }
+ if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1104:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserGrantRole == nil {
+ m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
+ }
+ if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1105:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserRevokeRole == nil {
+ m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
+ }
+ if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1106:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserList == nil {
+ m.AuthUserList = &AuthUserListRequest{}
+ }
+ if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1107:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleList == nil {
+ m.AuthRoleList = &AuthRoleListRequest{}
+ }
+ if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1200:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleAdd == nil {
+ m.AuthRoleAdd = &AuthRoleAddRequest{}
+ }
+ if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1201:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleDelete == nil {
+ m.AuthRoleDelete = &AuthRoleDeleteRequest{}
+ }
+ if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1202:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleGet == nil {
+ m.AuthRoleGet = &AuthRoleGetRequest{}
+ }
+ if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1203:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleGrantPermission == nil {
+ m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
+ }
+ if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1204:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleRevokePermission == nil {
+ m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
+ }
+ if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SimpleToken = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRaftInternal(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRaftInternal
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRaftInternal(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) }
+
+var fileDescriptorRaftInternal = []byte{
+ // 837 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40,
+ 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f,
+ 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c,
+ 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2,
+ 0x0f, 0xd4, 0x17, 0x50, 0xbc, 0xf1, 0xca, 0x1b, 0x7d, 0x00, 0x67, 0x3f, 0x92, 0x34, 0x6d, 0xca,
+ 0x5d, 0x72, 0xce, 0xff, 0xfc, 0xce, 0xd9, 0xec, 0x7f, 0xbb, 0x45, 0xb3, 0x8c, 0x1e, 0x72, 0xd3,
+ 0x76, 0x39, 0x30, 0x97, 0x3a, 0xab, 0x3e, 0xf3, 0xb8, 0x87, 0x0b, 0xc0, 0x1b, 0x56, 0x00, 0xac,
+ 0x0b, 0xcc, 0x3f, 0x28, 0xcd, 0x35, 0xbd, 0xa6, 0x27, 0x13, 0xf7, 0xc4, 0x93, 0xd2, 0x94, 0x66,
+ 0x62, 0x8d, 0x8e, 0xe4, 0x98, 0xdf, 0x50, 0x8f, 0x95, 0x67, 0x68, 0xda, 0x80, 0x17, 0x1d, 0x08,
+ 0xf8, 0x16, 0x50, 0x0b, 0x18, 0x2e, 0xa2, 0xec, 0x76, 0x9d, 0x64, 0x16, 0x33, 0x2b, 0x63, 0x46,
+ 0x76, 0xbb, 0x8e, 0x4b, 0x68, 0xaa, 0x13, 0x88, 0x96, 0x6d, 0x20, 0xd9, 0xc5, 0xcc, 0x4a, 0xce,
+ 0x88, 0xde, 0xf1, 0x32, 0x9a, 0xa6, 0x1d, 0xde, 0x32, 0x19, 0x74, 0xed, 0xc0, 0xf6, 0x5c, 0x32,
+ 0x2a, 0xcb, 0x0a, 0x22, 0x68, 0xe8, 0x58, 0xe5, 0x4f, 0x11, 0xcd, 0x6e, 0xeb, 0xa9, 0x0d, 0x7a,
+ 0xc8, 0x75, 0xbb, 0x81, 0x46, 0xd7, 0x50, 0xb6, 0x5b, 0x95, 0x2d, 0xf2, 0xd5, 0xf9, 0xd5, 0xde,
+ 0x75, 0xad, 0xea, 0x12, 0x23, 0xdb, 0xad, 0xe2, 0xfb, 0x68, 0x9c, 0x51, 0xb7, 0x09, 0xb2, 0x57,
+ 0xbe, 0x5a, 0xea, 0x53, 0x8a, 0x54, 0x28, 0x57, 0x42, 0x7c, 0x0b, 0x8d, 0xfa, 0x1d, 0x4e, 0xc6,
+ 0xa4, 0x9e, 0x24, 0xf5, 0x7b, 0x9d, 0x70, 0x1e, 0x43, 0x88, 0xf0, 0x3a, 0x2a, 0x58, 0xe0, 0x00,
+ 0x07, 0x53, 0x35, 0x19, 0x97, 0x45, 0x8b, 0xc9, 0xa2, 0xba, 0x54, 0x24, 0x5a, 0xe5, 0xad, 0x38,
+ 0x26, 0x1a, 0xf2, 0x63, 0x97, 0x4c, 0xa4, 0x35, 0xdc, 0x3f, 0x76, 0xa3, 0x86, 0xfc, 0xd8, 0xc5,
+ 0x0f, 0x10, 0x6a, 0x78, 0x6d, 0x9f, 0x36, 0xb8, 0xf8, 0x7e, 0x93, 0xb2, 0xe4, 0x6a, 0xb2, 0x64,
+ 0x3d, 0xca, 0x87, 0x95, 0x3d, 0x25, 0xf8, 0x21, 0xca, 0x3b, 0x40, 0x03, 0x30, 0x9b, 0x8c, 0xba,
+ 0x9c, 0x4c, 0xa5, 0x11, 0x76, 0x84, 0x60, 0x53, 0xe4, 0x23, 0x82, 0x13, 0x85, 0xc4, 0x9a, 0x15,
+ 0x81, 0x41, 0xd7, 0x3b, 0x02, 0x92, 0x4b, 0x5b, 0xb3, 0x44, 0x18, 0x52, 0x10, 0xad, 0xd9, 0x89,
+ 0x63, 0x62, 0x5b, 0xa8, 0x43, 0x59, 0x9b, 0xa0, 0xb4, 0x6d, 0xa9, 0x89, 0x54, 0xb4, 0x2d, 0x52,
+ 0x88, 0xd7, 0xd0, 0x44, 0x4b, 0x5a, 0x8e, 0x58, 0xb2, 0x64, 0x21, 0x75, 0xcf, 0x95, 0x2b, 0x0d,
+ 0x2d, 0xc5, 0x35, 0x94, 0x97, 0x8e, 0x03, 0x97, 0x1e, 0x38, 0x40, 0x7e, 0xa7, 0x7e, 0xb0, 0x5a,
+ 0x87, 0xb7, 0x36, 0xa4, 0x20, 0x5a, 0x2e, 0x8d, 0x42, 0xb8, 0x8e, 0xa4, 0x3f, 0x4d, 0xcb, 0x0e,
+ 0x24, 0xe3, 0xef, 0x64, 0xda, 0x7a, 0x05, 0xa3, 0xae, 0x14, 0xd1, 0x7a, 0x69, 0x1c, 0xc3, 0xbb,
+ 0x8a, 0x02, 0x2e, 0xb7, 0x1b, 0x94, 0x03, 0xf9, 0xa7, 0x28, 0x37, 0x93, 0x94, 0xd0, 0xf7, 0xb5,
+ 0x1e, 0x69, 0x88, 0x4b, 0xd4, 0xe3, 0x0d, 0x7d, 0x94, 0xc4, 0xd9, 0x32, 0xa9, 0x65, 0x91, 0x8f,
+ 0x53, 0xc3, 0xc6, 0x7a, 0x1c, 0x00, 0xab, 0x59, 0x56, 0x62, 0x2c, 0x1d, 0xc3, 0xbb, 0x68, 0x26,
+ 0xc6, 0x28, 0x4f, 0x92, 0x4f, 0x8a, 0xb4, 0x9c, 0x4e, 0xd2, 0x66, 0xd6, 0xb0, 0x22, 0x4d, 0x84,
+ 0x93, 0x63, 0x35, 0x81, 0x93, 0xcf, 0xe7, 0x8e, 0xb5, 0x09, 0x7c, 0x60, 0xac, 0x4d, 0xe0, 0xb8,
+ 0x89, 0xae, 0xc4, 0x98, 0x46, 0x4b, 0x9c, 0x12, 0xd3, 0xa7, 0x41, 0xf0, 0xd2, 0x63, 0x16, 0xf9,
+ 0xa2, 0x90, 0xb7, 0xd3, 0x91, 0xeb, 0x52, 0xbd, 0xa7, 0xc5, 0x21, 0xfd, 0x12, 0x4d, 0x4d, 0xe3,
+ 0x27, 0x68, 0xae, 0x67, 0x5e, 0x61, 0x6f, 0x93, 0x79, 0x0e, 0x90, 0x53, 0xd5, 0xe3, 0xfa, 0x90,
+ 0xb1, 0xe5, 0xd1, 0xf0, 0xe2, 0xad, 0xbe, 0x48, 0xfb, 0x33, 0xf8, 0x29, 0x9a, 0x8f, 0xc9, 0xea,
+ 0xa4, 0x28, 0xf4, 0x57, 0x85, 0xbe, 0x91, 0x8e, 0xd6, 0x47, 0xa6, 0x87, 0x8d, 0xe9, 0x40, 0x0a,
+ 0x6f, 0xa1, 0x62, 0x0c, 0x77, 0xec, 0x80, 0x93, 0x6f, 0x8a, 0xba, 0x94, 0x4e, 0xdd, 0xb1, 0x03,
+ 0x9e, 0xf0, 0x51, 0x18, 0x8c, 0x48, 0x62, 0x34, 0x45, 0xfa, 0x3e, 0x94, 0x24, 0x5a, 0x0f, 0x90,
+ 0xc2, 0x60, 0xb4, 0xf5, 0x92, 0x24, 0x1c, 0xf9, 0x26, 0x37, 0x6c, 0xeb, 0x45, 0x4d, 0xbf, 0x23,
+ 0x75, 0x2c, 0x72, 0xa4, 0xc4, 0x68, 0x47, 0xbe, 0xcd, 0x0d, 0x73, 0xa4, 0xa8, 0x4a, 0x71, 0x64,
+ 0x1c, 0x4e, 0x8e, 0x25, 0x1c, 0xf9, 0xee, 0xdc, 0xb1, 0xfa, 0x1d, 0xa9, 0x63, 0xf8, 0x39, 0x2a,
+ 0xf5, 0x60, 0xa4, 0x51, 0x7c, 0x60, 0x6d, 0x3b, 0x90, 0xf7, 0xd8, 0x7b, 0xc5, 0xbc, 0x33, 0x84,
+ 0x29, 0xe4, 0x7b, 0x91, 0x3a, 0xe4, 0x5f, 0xa6, 0xe9, 0x79, 0xdc, 0x46, 0x0b, 0x71, 0x2f, 0x6d,
+ 0x9d, 0x9e, 0x66, 0x1f, 0x54, 0xb3, 0xbb, 0xe9, 0xcd, 0x94, 0x4b, 0x06, 0xbb, 0x11, 0x3a, 0x44,
+ 0x50, 0xb9, 0x80, 0xa6, 0x37, 0xda, 0x3e, 0x7f, 0x65, 0x40, 0xe0, 0x7b, 0x6e, 0x00, 0x15, 0x1f,
+ 0x2d, 0x9c, 0xf3, 0x43, 0x84, 0x31, 0x1a, 0x93, 0xb7, 0x7b, 0x46, 0xde, 0xee, 0xf2, 0x59, 0xdc,
+ 0xfa, 0xd1, 0xf9, 0xd4, 0xb7, 0x7e, 0xf8, 0x8e, 0x97, 0x50, 0x21, 0xb0, 0xdb, 0xbe, 0x03, 0x26,
+ 0xf7, 0x8e, 0x40, 0x5d, 0xfa, 0x39, 0x23, 0xaf, 0x62, 0xfb, 0x22, 0xf4, 0x68, 0xee, 0xe4, 0x67,
+ 0x79, 0xe4, 0xe4, 0xac, 0x9c, 0x39, 0x3d, 0x2b, 0x67, 0x7e, 0x9c, 0x95, 0x33, 0xaf, 0x7f, 0x95,
+ 0x47, 0x0e, 0x26, 0xe4, 0x5f, 0x8e, 0xb5, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc9, 0xfc,
+ 0x0e, 0xca, 0x08, 0x00, 0x00,
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
new file mode 100644
index 0000000..40147f9
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
@@ -0,0 +1,18665 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: rpc.proto
+
+package etcdserverpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
+
+ authpb "github.com/coreos/etcd/auth/authpb"
+
+ context "golang.org/x/net/context"
+
+ grpc "google.golang.org/grpc"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AlarmType int32
+
+const (
+ AlarmType_NONE AlarmType = 0
+ AlarmType_NOSPACE AlarmType = 1
+ AlarmType_CORRUPT AlarmType = 2
+)
+
+var AlarmType_name = map[int32]string{
+ 0: "NONE",
+ 1: "NOSPACE",
+ 2: "CORRUPT",
+}
+var AlarmType_value = map[string]int32{
+ "NONE": 0,
+ "NOSPACE": 1,
+ "CORRUPT": 2,
+}
+
+func (x AlarmType) String() string {
+ return proto.EnumName(AlarmType_name, int32(x))
+}
+func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
+
+type RangeRequest_SortOrder int32
+
+const (
+ RangeRequest_NONE RangeRequest_SortOrder = 0
+ RangeRequest_ASCEND RangeRequest_SortOrder = 1
+ RangeRequest_DESCEND RangeRequest_SortOrder = 2
+)
+
+var RangeRequest_SortOrder_name = map[int32]string{
+ 0: "NONE",
+ 1: "ASCEND",
+ 2: "DESCEND",
+}
+var RangeRequest_SortOrder_value = map[string]int32{
+ "NONE": 0,
+ "ASCEND": 1,
+ "DESCEND": 2,
+}
+
+func (x RangeRequest_SortOrder) String() string {
+ return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
+}
+func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} }
+
+type RangeRequest_SortTarget int32
+
+const (
+ RangeRequest_KEY RangeRequest_SortTarget = 0
+ RangeRequest_VERSION RangeRequest_SortTarget = 1
+ RangeRequest_CREATE RangeRequest_SortTarget = 2
+ RangeRequest_MOD RangeRequest_SortTarget = 3
+ RangeRequest_VALUE RangeRequest_SortTarget = 4
+)
+
+var RangeRequest_SortTarget_name = map[int32]string{
+ 0: "KEY",
+ 1: "VERSION",
+ 2: "CREATE",
+ 3: "MOD",
+ 4: "VALUE",
+}
+var RangeRequest_SortTarget_value = map[string]int32{
+ "KEY": 0,
+ "VERSION": 1,
+ "CREATE": 2,
+ "MOD": 3,
+ "VALUE": 4,
+}
+
+func (x RangeRequest_SortTarget) String() string {
+ return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
+}
+func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} }
+
+type Compare_CompareResult int32
+
+const (
+ Compare_EQUAL Compare_CompareResult = 0
+ Compare_GREATER Compare_CompareResult = 1
+ Compare_LESS Compare_CompareResult = 2
+ Compare_NOT_EQUAL Compare_CompareResult = 3
+)
+
+var Compare_CompareResult_name = map[int32]string{
+ 0: "EQUAL",
+ 1: "GREATER",
+ 2: "LESS",
+ 3: "NOT_EQUAL",
+}
+var Compare_CompareResult_value = map[string]int32{
+ "EQUAL": 0,
+ "GREATER": 1,
+ "LESS": 2,
+ "NOT_EQUAL": 3,
+}
+
+func (x Compare_CompareResult) String() string {
+ return proto.EnumName(Compare_CompareResult_name, int32(x))
+}
+func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} }
+
+type Compare_CompareTarget int32
+
+const (
+ Compare_VERSION Compare_CompareTarget = 0
+ Compare_CREATE Compare_CompareTarget = 1
+ Compare_MOD Compare_CompareTarget = 2
+ Compare_VALUE Compare_CompareTarget = 3
+ Compare_LEASE Compare_CompareTarget = 4
+)
+
+var Compare_CompareTarget_name = map[int32]string{
+ 0: "VERSION",
+ 1: "CREATE",
+ 2: "MOD",
+ 3: "VALUE",
+ 4: "LEASE",
+}
+var Compare_CompareTarget_value = map[string]int32{
+ "VERSION": 0,
+ "CREATE": 1,
+ "MOD": 2,
+ "VALUE": 3,
+ "LEASE": 4,
+}
+
+func (x Compare_CompareTarget) String() string {
+ return proto.EnumName(Compare_CompareTarget_name, int32(x))
+}
+func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} }
+
+type WatchCreateRequest_FilterType int32
+
+const (
+ // filter out put event.
+ WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
+ // filter out delete event.
+ WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
+)
+
+var WatchCreateRequest_FilterType_name = map[int32]string{
+ 0: "NOPUT",
+ 1: "NODELETE",
+}
+var WatchCreateRequest_FilterType_value = map[string]int32{
+ "NOPUT": 0,
+ "NODELETE": 1,
+}
+
+func (x WatchCreateRequest_FilterType) String() string {
+ return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
+}
+func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{21, 0}
+}
+
+type AlarmRequest_AlarmAction int32
+
+const (
+ AlarmRequest_GET AlarmRequest_AlarmAction = 0
+ AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1
+ AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
+)
+
+var AlarmRequest_AlarmAction_name = map[int32]string{
+ 0: "GET",
+ 1: "ACTIVATE",
+ 2: "DEACTIVATE",
+}
+var AlarmRequest_AlarmAction_value = map[string]int32{
+ "GET": 0,
+ "ACTIVATE": 1,
+ "DEACTIVATE": 2,
+}
+
+func (x AlarmRequest_AlarmAction) String() string {
+ return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
+}
+func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{48, 0}
+}
+
+type ResponseHeader struct {
+ // cluster_id is the ID of the cluster which sent the response.
+ ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ // member_id is the ID of the member which sent the response.
+ MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
+ // revision is the key-value store revision when the request was applied.
+ Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
+ // raft_term is the raft term when the request was applied.
+ RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
+}
+
+func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
+func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
+func (*ResponseHeader) ProtoMessage() {}
+func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
+
+func (m *ResponseHeader) GetClusterId() uint64 {
+ if m != nil {
+ return m.ClusterId
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetMemberId() uint64 {
+ if m != nil {
+ return m.MemberId
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetRaftTerm() uint64 {
+ if m != nil {
+ return m.RaftTerm
+ }
+ return 0
+}
+
+type RangeRequest struct {
+ // key is the first key for the range. If range_end is not given, the request only looks up key.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the upper bound on the requested range [key, range_end).
+ // If range_end is '\0', the range is all keys >= key.
+ // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+ // then the range request gets all keys prefixed with key.
+ // If both key and range_end are '\0', then the range request returns all keys.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+ // it is treated as no limit.
+ Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ // revision is the point-in-time of the key-value store to use for the range.
+ // If revision is less or equal to zero, the range is over the newest key-value store.
+ // If the revision has been compacted, ErrCompacted is returned as a response.
+ Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
+ // sort_order is the order for returned sorted results.
+ SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
+ // sort_target is the key-value field to use for sorting.
+ SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
+ // serializable sets the range request to use serializable member-local reads.
+ // Range requests are linearizable by default; linearizable requests have higher
+ // latency and lower throughput than serializable requests but reflect the current
+ // consensus of the cluster. For better performance, in exchange for possible stale reads,
+ // a serializable range request is served locally without needing to reach consensus
+ // with other nodes in the cluster.
+ Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
+ // keys_only when set returns only the keys and not the values.
+ KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
+ // count_only when set returns only the count of the keys in the range.
+ CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
+ // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+ // lesser mod revisions will be filtered away.
+ MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
+ // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+ // greater mod revisions will be filtered away.
+ MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
+ // min_create_revision is the lower bound for returned key create revisions; all keys with
+ // lesser create trevisions will be filtered away.
+ MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
+ // max_create_revision is the upper bound for returned key create revisions; all keys with
+ // greater create revisions will be filtered away.
+ MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
+}
+
+func (m *RangeRequest) Reset() { *m = RangeRequest{} }
+func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
+func (*RangeRequest) ProtoMessage() {}
+func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
+
+func (m *RangeRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *RangeRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *RangeRequest) GetLimit() int64 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
+ if m != nil {
+ return m.SortOrder
+ }
+ return RangeRequest_NONE
+}
+
+func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
+ if m != nil {
+ return m.SortTarget
+ }
+ return RangeRequest_KEY
+}
+
+func (m *RangeRequest) GetSerializable() bool {
+ if m != nil {
+ return m.Serializable
+ }
+ return false
+}
+
+func (m *RangeRequest) GetKeysOnly() bool {
+ if m != nil {
+ return m.KeysOnly
+ }
+ return false
+}
+
+func (m *RangeRequest) GetCountOnly() bool {
+ if m != nil {
+ return m.CountOnly
+ }
+ return false
+}
+
+func (m *RangeRequest) GetMinModRevision() int64 {
+ if m != nil {
+ return m.MinModRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMaxModRevision() int64 {
+ if m != nil {
+ return m.MaxModRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMinCreateRevision() int64 {
+ if m != nil {
+ return m.MinCreateRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMaxCreateRevision() int64 {
+ if m != nil {
+ return m.MaxCreateRevision
+ }
+ return 0
+}
+
+type RangeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // kvs is the list of key-value pairs matched by the range request.
+ // kvs is empty when count is requested.
+ Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"`
+ // more indicates if there are more keys to return in the requested range.
+ More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
+ // count is set to the number of keys within the range when requested.
+ Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (m *RangeResponse) Reset() { *m = RangeResponse{} }
+func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
+func (*RangeResponse) ProtoMessage() {}
+func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
+
+func (m *RangeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
+ if m != nil {
+ return m.Kvs
+ }
+ return nil
+}
+
+func (m *RangeResponse) GetMore() bool {
+ if m != nil {
+ return m.More
+ }
+ return false
+}
+
+func (m *RangeResponse) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+type PutRequest struct {
+ // key is the key, in bytes, to put into the key-value store.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // value is the value, in bytes, to associate with the key in the key-value store.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // lease is the lease ID to associate with the key in the key-value store. A lease
+ // value of 0 indicates no lease.
+ Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
+ // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+ // The previous key-value pair will be returned in the put response.
+ PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ // If ignore_value is set, etcd updates the key using its current value.
+ // Returns an error if the key does not exist.
+ IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
+ // If ignore_lease is set, etcd updates the key using its current lease.
+ // Returns an error if the key does not exist.
+ IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} }
+
+func (m *PutRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *PutRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+func (m *PutRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+func (m *PutRequest) GetIgnoreValue() bool {
+ if m != nil {
+ return m.IgnoreValue
+ }
+ return false
+}
+
+func (m *PutRequest) GetIgnoreLease() bool {
+ if m != nil {
+ return m.IgnoreLease
+ }
+ return false
+}
+
+type PutResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // if prev_kv is set in the request, the previous key-value pair will be returned.
+ PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} }
+
+func (m *PutResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
+ if m != nil {
+ return m.PrevKv
+ }
+ return nil
+}
+
+type DeleteRangeRequest struct {
+ // key is the first key to delete in the range.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the key following the last key to delete for the range [key, range_end).
+ // If range_end is not given, the range is defined to contain only the key argument.
+ // If range_end is one bit larger than the given key, then the range is all the keys
+ // with the prefix (the given key).
+ // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+ // The previous key-value pairs will be returned in the delete response.
+ PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+}
+
+func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} }
+func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeRequest) ProtoMessage() {}
+func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} }
+
+func (m *DeleteRangeRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRangeRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *DeleteRangeRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+type DeleteRangeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // deleted is the number of keys deleted by the delete range request.
+ Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
+ // if prev_kv is set in the request, the previous key-value pairs will be returned.
+ PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"`
+}
+
+func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} }
+func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeResponse) ProtoMessage() {}
+func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} }
+
+func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRangeResponse) GetDeleted() int64 {
+ if m != nil {
+ return m.Deleted
+ }
+ return 0
+}
+
+func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
+ if m != nil {
+ return m.PrevKvs
+ }
+ return nil
+}
+
+type RequestOp struct {
+ // request is a union of request types accepted by a transaction.
+ //
+ // Types that are valid to be assigned to Request:
+ // *RequestOp_RequestRange
+ // *RequestOp_RequestPut
+ // *RequestOp_RequestDeleteRange
+ // *RequestOp_RequestTxn
+ Request isRequestOp_Request `protobuf_oneof:"request"`
+}
+
+func (m *RequestOp) Reset() { *m = RequestOp{} }
+func (m *RequestOp) String() string { return proto.CompactTextString(m) }
+func (*RequestOp) ProtoMessage() {}
+func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} }
+
+type isRequestOp_Request interface {
+ isRequestOp_Request()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type RequestOp_RequestRange struct {
+ RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"`
+}
+type RequestOp_RequestPut struct {
+ RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"`
+}
+type RequestOp_RequestDeleteRange struct {
+ RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"`
+}
+type RequestOp_RequestTxn struct {
+ RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"`
+}
+
+func (*RequestOp_RequestRange) isRequestOp_Request() {}
+func (*RequestOp_RequestPut) isRequestOp_Request() {}
+func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
+func (*RequestOp_RequestTxn) isRequestOp_Request() {}
+
+func (m *RequestOp) GetRequest() isRequestOp_Request {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestRange() *RangeRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
+ return x.RequestRange
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestPut() *PutRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
+ return x.RequestPut
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
+ return x.RequestDeleteRange
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestTxn() *TxnRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
+ return x.RequestTxn
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
+ (*RequestOp_RequestRange)(nil),
+ (*RequestOp_RequestPut)(nil),
+ (*RequestOp_RequestDeleteRange)(nil),
+ (*RequestOp_RequestTxn)(nil),
+ }
+}
+
+func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*RequestOp)
+ // request
+ switch x := m.Request.(type) {
+ case *RequestOp_RequestRange:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestRange); err != nil {
+ return err
+ }
+ case *RequestOp_RequestPut:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestPut); err != nil {
+ return err
+ }
+ case *RequestOp_RequestDeleteRange:
+ _ = b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestDeleteRange); err != nil {
+ return err
+ }
+ case *RequestOp_RequestTxn:
+ _ = b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestTxn); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("RequestOp.Request has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*RequestOp)
+ switch tag {
+ case 1: // request.request_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RangeRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestRange{msg}
+ return true, err
+ case 2: // request.request_put
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(PutRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestPut{msg}
+ return true, err
+ case 3: // request.request_delete_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(DeleteRangeRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestDeleteRange{msg}
+ return true, err
+ case 4: // request.request_txn
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(TxnRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestTxn{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _RequestOp_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*RequestOp)
+ // request
+ switch x := m.Request.(type) {
+ case *RequestOp_RequestRange:
+ s := proto.Size(x.RequestRange)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *RequestOp_RequestPut:
+ s := proto.Size(x.RequestPut)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *RequestOp_RequestDeleteRange:
+ s := proto.Size(x.RequestDeleteRange)
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *RequestOp_RequestTxn:
+ s := proto.Size(x.RequestTxn)
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type ResponseOp struct {
+ // response is a union of response types returned by a transaction.
+ //
+ // Types that are valid to be assigned to Response:
+ // *ResponseOp_ResponseRange
+ // *ResponseOp_ResponsePut
+ // *ResponseOp_ResponseDeleteRange
+ // *ResponseOp_ResponseTxn
+ Response isResponseOp_Response `protobuf_oneof:"response"`
+}
+
+func (m *ResponseOp) Reset() { *m = ResponseOp{} }
+func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
+func (*ResponseOp) ProtoMessage() {}
+func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} }
+
+type isResponseOp_Response interface {
+ isResponseOp_Response()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type ResponseOp_ResponseRange struct {
+ ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"`
+}
+type ResponseOp_ResponsePut struct {
+ ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"`
+}
+type ResponseOp_ResponseDeleteRange struct {
+ ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"`
+}
+type ResponseOp_ResponseTxn struct {
+ ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"`
+}
+
+func (*ResponseOp_ResponseRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponsePut) isResponseOp_Response() {}
+func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponseTxn) isResponseOp_Response() {}
+
+func (m *ResponseOp) GetResponse() isResponseOp_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseRange() *RangeResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
+ return x.ResponseRange
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponsePut() *PutResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
+ return x.ResponsePut
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
+ return x.ResponseDeleteRange
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseTxn() *TxnResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
+ return x.ResponseTxn
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{
+ (*ResponseOp_ResponseRange)(nil),
+ (*ResponseOp_ResponsePut)(nil),
+ (*ResponseOp_ResponseDeleteRange)(nil),
+ (*ResponseOp_ResponseTxn)(nil),
+ }
+}
+
+func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*ResponseOp)
+ // response
+ switch x := m.Response.(type) {
+ case *ResponseOp_ResponseRange:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponseRange); err != nil {
+ return err
+ }
+ case *ResponseOp_ResponsePut:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponsePut); err != nil {
+ return err
+ }
+ case *ResponseOp_ResponseDeleteRange:
+ _ = b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil {
+ return err
+ }
+ case *ResponseOp_ResponseTxn:
+ _ = b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponseTxn); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("ResponseOp.Response has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*ResponseOp)
+ switch tag {
+ case 1: // response.response_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RangeResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponseRange{msg}
+ return true, err
+ case 2: // response.response_put
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(PutResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponsePut{msg}
+ return true, err
+ case 3: // response.response_delete_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(DeleteRangeResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponseDeleteRange{msg}
+ return true, err
+ case 4: // response.response_txn
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(TxnResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponseTxn{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _ResponseOp_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*ResponseOp)
+ // response
+ switch x := m.Response.(type) {
+ case *ResponseOp_ResponseRange:
+ s := proto.Size(x.ResponseRange)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ResponseOp_ResponsePut:
+ s := proto.Size(x.ResponsePut)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ResponseOp_ResponseDeleteRange:
+ s := proto.Size(x.ResponseDeleteRange)
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ResponseOp_ResponseTxn:
+ s := proto.Size(x.ResponseTxn)
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Compare struct {
+ // result is logical comparison operation for this comparison.
+ Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
+ // target is the key-value field to inspect for the comparison.
+ Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
+ // key is the subject key for the comparison operation.
+ Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+ // Types that are valid to be assigned to TargetUnion:
+ // *Compare_Version
+ // *Compare_CreateRevision
+ // *Compare_ModRevision
+ // *Compare_Value
+ // *Compare_Lease
+ TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
+ // range_end compares the given target to all keys in the range [key, range_end).
+ // See RangeRequest for more details on key ranges.
+ RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (m *Compare) Reset() { *m = Compare{} }
+func (m *Compare) String() string { return proto.CompactTextString(m) }
+func (*Compare) ProtoMessage() {}
+func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} }
+
+type isCompare_TargetUnion interface {
+ isCompare_TargetUnion()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Compare_Version struct {
+ Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"`
+}
+type Compare_CreateRevision struct {
+ CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"`
+}
+type Compare_ModRevision struct {
+ ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"`
+}
+type Compare_Value struct {
+ Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"`
+}
+type Compare_Lease struct {
+ Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"`
+}
+
+func (*Compare_Version) isCompare_TargetUnion() {}
+func (*Compare_CreateRevision) isCompare_TargetUnion() {}
+func (*Compare_ModRevision) isCompare_TargetUnion() {}
+func (*Compare_Value) isCompare_TargetUnion() {}
+func (*Compare_Lease) isCompare_TargetUnion() {}
+
+func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
+ if m != nil {
+ return m.TargetUnion
+ }
+ return nil
+}
+
+func (m *Compare) GetResult() Compare_CompareResult {
+ if m != nil {
+ return m.Result
+ }
+ return Compare_EQUAL
+}
+
+func (m *Compare) GetTarget() Compare_CompareTarget {
+ if m != nil {
+ return m.Target
+ }
+ return Compare_VERSION
+}
+
+func (m *Compare) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Compare) GetVersion() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
+ return x.Version
+ }
+ return 0
+}
+
+func (m *Compare) GetCreateRevision() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
+ return x.CreateRevision
+ }
+ return 0
+}
+
+func (m *Compare) GetModRevision() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
+ return x.ModRevision
+ }
+ return 0
+}
+
+func (m *Compare) GetValue() []byte {
+ if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (m *Compare) GetLease() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
+ return x.Lease
+ }
+ return 0
+}
+
+func (m *Compare) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{
+ (*Compare_Version)(nil),
+ (*Compare_CreateRevision)(nil),
+ (*Compare_ModRevision)(nil),
+ (*Compare_Value)(nil),
+ (*Compare_Lease)(nil),
+ }
+}
+
+func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Compare)
+ // target_union
+ switch x := m.TargetUnion.(type) {
+ case *Compare_Version:
+ _ = b.EncodeVarint(4<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.Version))
+ case *Compare_CreateRevision:
+ _ = b.EncodeVarint(5<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.CreateRevision))
+ case *Compare_ModRevision:
+ _ = b.EncodeVarint(6<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.ModRevision))
+ case *Compare_Value:
+ _ = b.EncodeVarint(7<<3 | proto.WireBytes)
+ _ = b.EncodeRawBytes(x.Value)
+ case *Compare_Lease:
+ _ = b.EncodeVarint(8<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.Lease))
+ case nil:
+ default:
+ return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Compare)
+ switch tag {
+ case 4: // target_union.version
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_Version{int64(x)}
+ return true, err
+ case 5: // target_union.create_revision
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_CreateRevision{int64(x)}
+ return true, err
+ case 6: // target_union.mod_revision
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_ModRevision{int64(x)}
+ return true, err
+ case 7: // target_union.value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.TargetUnion = &Compare_Value{x}
+ return true, err
+ case 8: // target_union.lease
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_Lease{int64(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Compare_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Compare)
+ // target_union
+ switch x := m.TargetUnion.(type) {
+ case *Compare_Version:
+ n += proto.SizeVarint(4<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Version))
+ case *Compare_CreateRevision:
+ n += proto.SizeVarint(5<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.CreateRevision))
+ case *Compare_ModRevision:
+ n += proto.SizeVarint(6<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.ModRevision))
+ case *Compare_Value:
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Value)))
+ n += len(x.Value)
+ case *Compare_Lease:
+ n += proto.SizeVarint(8<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Lease))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+type TxnRequest struct {
+ // compare is a list of predicates representing a conjunction of terms.
+ // If the comparisons succeed, then the success requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ // If the comparisons fail, then the failure requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"`
+ // success is a list of requests which will be applied when compare evaluates to true.
+ Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"`
+ // failure is a list of requests which will be applied when compare evaluates to false.
+ Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"`
+}
+
+func (m *TxnRequest) Reset() { *m = TxnRequest{} }
+func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
+func (*TxnRequest) ProtoMessage() {}
+func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} }
+
+func (m *TxnRequest) GetCompare() []*Compare {
+ if m != nil {
+ return m.Compare
+ }
+ return nil
+}
+
+func (m *TxnRequest) GetSuccess() []*RequestOp {
+ if m != nil {
+ return m.Success
+ }
+ return nil
+}
+
+func (m *TxnRequest) GetFailure() []*RequestOp {
+ if m != nil {
+ return m.Failure
+ }
+ return nil
+}
+
+type TxnResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // succeeded is set to true if the compare evaluated to true or false otherwise.
+ Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
+ // responses is a list of responses corresponding to the results from applying
+ // success if succeeded is true or failure if succeeded is false.
+ Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"`
+}
+
+func (m *TxnResponse) Reset() { *m = TxnResponse{} }
+func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
+func (*TxnResponse) ProtoMessage() {}
+func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} }
+
+func (m *TxnResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TxnResponse) GetSucceeded() bool {
+ if m != nil {
+ return m.Succeeded
+ }
+ return false
+}
+
+func (m *TxnResponse) GetResponses() []*ResponseOp {
+ if m != nil {
+ return m.Responses
+ }
+ return nil
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+type CompactionRequest struct {
+ // revision is the key-value store revision for the compaction operation.
+ Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+ // physical is set so the RPC will wait until the compaction is physically
+ // applied to the local database such that compacted entries are totally
+ // removed from the backend database.
+ Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
+}
+
+func (m *CompactionRequest) Reset() { *m = CompactionRequest{} }
+func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
+func (*CompactionRequest) ProtoMessage() {}
+func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} }
+
+func (m *CompactionRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *CompactionRequest) GetPhysical() bool {
+ if m != nil {
+ return m.Physical
+ }
+ return false
+}
+
+type CompactionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *CompactionResponse) Reset() { *m = CompactionResponse{} }
+func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
+func (*CompactionResponse) ProtoMessage() {}
+func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} }
+
+func (m *CompactionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type HashRequest struct {
+}
+
+func (m *HashRequest) Reset() { *m = HashRequest{} }
+func (m *HashRequest) String() string { return proto.CompactTextString(m) }
+func (*HashRequest) ProtoMessage() {}
+func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} }
+
+type HashKVRequest struct {
+ // revision is the key-value store revision for the hash operation.
+ Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+}
+
+func (m *HashKVRequest) Reset() { *m = HashKVRequest{} }
+func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
+func (*HashKVRequest) ProtoMessage() {}
+func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} }
+
+func (m *HashKVRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+type HashKVResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+ Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+ // compact_revision is the compacted revision of key-value store when hash begins.
+ CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+}
+
+func (m *HashKVResponse) Reset() { *m = HashKVResponse{} }
+func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
+func (*HashKVResponse) ProtoMessage() {}
+func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} }
+
+func (m *HashKVResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *HashKVResponse) GetHash() uint32 {
+ if m != nil {
+ return m.Hash
+ }
+ return 0
+}
+
+func (m *HashKVResponse) GetCompactRevision() int64 {
+ if m != nil {
+ return m.CompactRevision
+ }
+ return 0
+}
+
+type HashResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // hash is the hash value computed from the responding member's KV's backend.
+ Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+}
+
+func (m *HashResponse) Reset() { *m = HashResponse{} }
+func (m *HashResponse) String() string { return proto.CompactTextString(m) }
+func (*HashResponse) ProtoMessage() {}
+func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} }
+
+func (m *HashResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *HashResponse) GetHash() uint32 {
+ if m != nil {
+ return m.Hash
+ }
+ return 0
+}
+
+type SnapshotRequest struct {
+}
+
+func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} }
+func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*SnapshotRequest) ProtoMessage() {}
+func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} }
+
+type SnapshotResponse struct {
+ // header has the current key-value store information. The first header in the snapshot
+ // stream indicates the point in time of the snapshot.
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // remaining_bytes is the number of blob bytes to be sent after this message
+ RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
+ // blob contains the next chunk of the snapshot in the snapshot stream.
+ Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+}
+
+func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} }
+func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*SnapshotResponse) ProtoMessage() {}
+func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} }
+
+func (m *SnapshotResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *SnapshotResponse) GetRemainingBytes() uint64 {
+ if m != nil {
+ return m.RemainingBytes
+ }
+ return 0
+}
+
+func (m *SnapshotResponse) GetBlob() []byte {
+ if m != nil {
+ return m.Blob
+ }
+ return nil
+}
+
+type WatchRequest struct {
+ // request_union is a request to either create a new watcher or cancel an existing watcher.
+ //
+ // Types that are valid to be assigned to RequestUnion:
+ // *WatchRequest_CreateRequest
+ // *WatchRequest_CancelRequest
+ RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
+}
+
+func (m *WatchRequest) Reset() { *m = WatchRequest{} }
+func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchRequest) ProtoMessage() {}
+func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} }
+
+type isWatchRequest_RequestUnion interface {
+ isWatchRequest_RequestUnion()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type WatchRequest_CreateRequest struct {
+ CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"`
+}
+type WatchRequest_CancelRequest struct {
+ CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"`
+}
+
+func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {}
+func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {}
+
+func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
+ if m != nil {
+ return m.RequestUnion
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
+ return x.CreateRequest
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
+ return x.CancelRequest
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
+ (*WatchRequest_CreateRequest)(nil),
+ (*WatchRequest_CancelRequest)(nil),
+ }
+}
+
+func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*WatchRequest)
+ // request_union
+ switch x := m.RequestUnion.(type) {
+ case *WatchRequest_CreateRequest:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.CreateRequest); err != nil {
+ return err
+ }
+ case *WatchRequest_CancelRequest:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.CancelRequest); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*WatchRequest)
+ switch tag {
+ case 1: // request_union.create_request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(WatchCreateRequest)
+ err := b.DecodeMessage(msg)
+ m.RequestUnion = &WatchRequest_CreateRequest{msg}
+ return true, err
+ case 2: // request_union.cancel_request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(WatchCancelRequest)
+ err := b.DecodeMessage(msg)
+ m.RequestUnion = &WatchRequest_CancelRequest{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*WatchRequest)
+ // request_union
+ switch x := m.RequestUnion.(type) {
+ case *WatchRequest_CreateRequest:
+ s := proto.Size(x.CreateRequest)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *WatchRequest_CancelRequest:
+ s := proto.Size(x.CancelRequest)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type WatchCreateRequest struct {
+ // key is the key to register for watching.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+ // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+ // or equal to the key argument are watched.
+ // If the range_end is one bit larger than the given key,
+ // then all keys with the prefix (the given key) will be watched.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+ StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
+ // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+ // no events to the new watcher if there are no recent events. It is useful when clients
+ // wish to recover a disconnected watcher starting from a recent known revision.
+ // The etcd server may decide how often it will send notifications based on current load.
+ ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
+ // filters filter the events at server side before it sends back to the watcher.
+ Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
+ // If prev_kv is set, created watcher gets the previous KV before the event happens.
+ // If the previous KV is already compacted, nothing will be returned.
+ PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+}
+
+func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} }
+func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCreateRequest) ProtoMessage() {}
+func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} }
+
+func (m *WatchCreateRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetStartRevision() int64 {
+ if m != nil {
+ return m.StartRevision
+ }
+ return 0
+}
+
+func (m *WatchCreateRequest) GetProgressNotify() bool {
+ if m != nil {
+ return m.ProgressNotify
+ }
+ return false
+}
+
+func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
+ if m != nil {
+ return m.Filters
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+type WatchCancelRequest struct {
+ // watch_id is the watcher id to cancel so that no more events are transmitted.
+ WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+}
+
+func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} }
+func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCancelRequest) ProtoMessage() {}
+func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} }
+
+func (m *WatchCancelRequest) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+type WatchResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // watch_id is the ID of the watcher that corresponds to the response.
+ WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+ // created is set to true if the response is for a create watch request.
+ // The client should record the watch_id and expect to receive events for
+ // the created watcher from the same stream.
+ // All events sent to the created watcher will attach with the same watch_id.
+ Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
+ // canceled is set to true if the response is for a cancel watch request.
+ // No further events will be sent to the canceled watcher.
+ Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
+ // compact_revision is set to the minimum index if a watcher tries to watch
+ // at a compacted index.
+ //
+ // This happens when creating a watcher at a compacted revision or the watcher cannot
+ // catch up with the progress of the key-value store.
+ //
+ // The client should treat the watcher as canceled and should not try to create any
+ // watcher with the same start_revision again.
+ CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ // cancel_reason indicates the reason for canceling the watcher.
+ CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
+ Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"`
+}
+
+func (m *WatchResponse) Reset() { *m = WatchResponse{} }
+func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
+func (*WatchResponse) ProtoMessage() {}
+func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} }
+
+func (m *WatchResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *WatchResponse) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+func (m *WatchResponse) GetCreated() bool {
+ if m != nil {
+ return m.Created
+ }
+ return false
+}
+
+func (m *WatchResponse) GetCanceled() bool {
+ if m != nil {
+ return m.Canceled
+ }
+ return false
+}
+
+func (m *WatchResponse) GetCompactRevision() int64 {
+ if m != nil {
+ return m.CompactRevision
+ }
+ return 0
+}
+
+func (m *WatchResponse) GetCancelReason() string {
+ if m != nil {
+ return m.CancelReason
+ }
+ return ""
+}
+
+func (m *WatchResponse) GetEvents() []*mvccpb.Event {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+type LeaseGrantRequest struct {
+ // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+ TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} }
+func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantRequest) ProtoMessage() {}
+func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} }
+
+func (m *LeaseGrantRequest) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseGrantRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseGrantResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // ID is the lease ID for the granted lease.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the server chosen lease time-to-live in seconds.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} }
+func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantResponse) ProtoMessage() {}
+func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} }
+
+func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseGrantResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseGrantResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseGrantResponse) GetError() string {
+ if m != nil {
+ return m.Error
+ }
+ return ""
+}
+
+type LeaseRevokeRequest struct {
+ // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} }
+func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeRequest) ProtoMessage() {}
+func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} }
+
+func (m *LeaseRevokeRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseRevokeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} }
+func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeResponse) ProtoMessage() {}
+func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} }
+
+func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type LeaseKeepAliveRequest struct {
+ // ID is the lease ID for the lease to keep alive.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} }
+func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveRequest) ProtoMessage() {}
+func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} }
+
+func (m *LeaseKeepAliveRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseKeepAliveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // ID is the lease ID from the keep alive request.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the new time-to-live for the lease.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+}
+
+func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} }
+func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveResponse) ProtoMessage() {}
+func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} }
+
+func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseKeepAliveResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseKeepAliveResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+type LeaseTimeToLiveRequest struct {
+ // ID is the lease ID for the lease.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // keys is true to query all the keys attached to this lease.
+ Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
+}
+
+func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} }
+func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveRequest) ProtoMessage() {}
+func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} }
+
+func (m *LeaseTimeToLiveRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveRequest) GetKeys() bool {
+ if m != nil {
+ return m.Keys
+ }
+ return false
+}
+
+type LeaseTimeToLiveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // ID is the lease ID from the keep alive request.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+ GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
+ // Keys is the list of keys attached to this lease.
+ Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"`
+}
+
+func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} }
+func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveResponse) ProtoMessage() {}
+func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} }
+
+func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseTimeToLiveResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
+ if m != nil {
+ return m.GrantedTTL
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
+ if m != nil {
+ return m.Keys
+ }
+ return nil
+}
+
+type LeaseLeasesRequest struct {
+}
+
+func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} }
+func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesRequest) ProtoMessage() {}
+func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} }
+
+type LeaseStatus struct {
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseStatus) Reset() { *m = LeaseStatus{} }
+func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
+func (*LeaseStatus) ProtoMessage() {}
+func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} }
+
+func (m *LeaseStatus) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseLeasesResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"`
+}
+
+func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} }
+func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesResponse) ProtoMessage() {}
+func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} }
+
+func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
+ if m != nil {
+ return m.Leases
+ }
+ return nil
+}
+
+type Member struct {
+ // ID is the member ID for this member.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ // peerURLs is the list of URLs the member exposes to the cluster for communication.
+ PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"`
+ // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+ ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"`
+}
+
+func (m *Member) Reset() { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage() {}
+func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} }
+
+func (m *Member) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *Member) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Member) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+func (m *Member) GetClientURLs() []string {
+ if m != nil {
+ return m.ClientURLs
+ }
+ return nil
+}
+
+type MemberAddRequest struct {
+ // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+ PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"`
+}
+
+func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} }
+func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberAddRequest) ProtoMessage() {}
+func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} }
+
+func (m *MemberAddRequest) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+type MemberAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // member is the member information for the added member.
+ Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"`
+ // members is a list of all members after adding the new member.
+ Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} }
+func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberAddResponse) ProtoMessage() {}
+func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} }
+
+func (m *MemberAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberAddResponse) GetMember() *Member {
+ if m != nil {
+ return m.Member
+ }
+ return nil
+}
+
+func (m *MemberAddResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberRemoveRequest struct {
+ // ID is the member ID of the member to remove.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} }
+func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveRequest) ProtoMessage() {}
+func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} }
+
+func (m *MemberRemoveRequest) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type MemberRemoveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // members is a list of all members after removing the member.
+ Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} }
+func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveResponse) ProtoMessage() {}
+func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} }
+
+func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberRemoveResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberUpdateRequest struct {
+ // ID is the member ID of the member to update.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+ PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"`
+}
+
+func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} }
+func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateRequest) ProtoMessage() {}
+func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} }
+
+func (m *MemberUpdateRequest) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *MemberUpdateRequest) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+type MemberUpdateResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // members is a list of all members after updating the member.
+ Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} }
+func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateResponse) ProtoMessage() {}
+func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} }
+
+func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberUpdateResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberListRequest struct {
+}
+
+func (m *MemberListRequest) Reset() { *m = MemberListRequest{} }
+func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberListRequest) ProtoMessage() {}
+func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} }
+
+type MemberListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // members is a list of all members associated with the cluster.
+ Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberListResponse) Reset() { *m = MemberListResponse{} }
+func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberListResponse) ProtoMessage() {}
+func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} }
+
+func (m *MemberListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberListResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type DefragmentRequest struct {
+}
+
+func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} }
+func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
+func (*DefragmentRequest) ProtoMessage() {}
+func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} }
+
+type DefragmentResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} }
+func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
+func (*DefragmentResponse) ProtoMessage() {}
+func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} }
+
+func (m *DefragmentResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type MoveLeaderRequest struct {
+ // targetID is the node ID for the new leader.
+ TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
+}
+
+func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} }
+func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderRequest) ProtoMessage() {}
+func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} }
+
+func (m *MoveLeaderRequest) GetTargetID() uint64 {
+ if m != nil {
+ return m.TargetID
+ }
+ return 0
+}
+
+type MoveLeaderResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} }
+func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderResponse) ProtoMessage() {}
+func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} }
+
+func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AlarmRequest struct {
+ // action is the kind of alarm request to issue. The action
+ // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+ // raised alarm.
+ Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
+ // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+ // alarm request covers all members.
+ MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
+ // alarm is the type of alarm to consider for this request.
+ Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+}
+
+func (m *AlarmRequest) Reset() { *m = AlarmRequest{} }
+func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*AlarmRequest) ProtoMessage() {}
+func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} }
+
+func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
+ if m != nil {
+ return m.Action
+ }
+ return AlarmRequest_GET
+}
+
+func (m *AlarmRequest) GetMemberID() uint64 {
+ if m != nil {
+ return m.MemberID
+ }
+ return 0
+}
+
+func (m *AlarmRequest) GetAlarm() AlarmType {
+ if m != nil {
+ return m.Alarm
+ }
+ return AlarmType_NONE
+}
+
+type AlarmMember struct {
+ // memberID is the ID of the member associated with the raised alarm.
+ MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
+ // alarm is the type of alarm which has been raised.
+ Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+}
+
+func (m *AlarmMember) Reset() { *m = AlarmMember{} }
+func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
+func (*AlarmMember) ProtoMessage() {}
+func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} }
+
+func (m *AlarmMember) GetMemberID() uint64 {
+ if m != nil {
+ return m.MemberID
+ }
+ return 0
+}
+
+func (m *AlarmMember) GetAlarm() AlarmType {
+ if m != nil {
+ return m.Alarm
+ }
+ return AlarmType_NONE
+}
+
+type AlarmResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // alarms is a list of alarms associated with the alarm request.
+ Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"`
+}
+
+func (m *AlarmResponse) Reset() { *m = AlarmResponse{} }
+func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
+func (*AlarmResponse) ProtoMessage() {}
+func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} }
+
+func (m *AlarmResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AlarmResponse) GetAlarms() []*AlarmMember {
+ if m != nil {
+ return m.Alarms
+ }
+ return nil
+}
+
+type StatusRequest struct {
+}
+
+func (m *StatusRequest) Reset() { *m = StatusRequest{} }
+func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
+func (*StatusRequest) ProtoMessage() {}
+func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} }
+
+type StatusResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // version is the cluster protocol version used by the responding member.
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ // dbSize is the size of the backend database, in bytes, of the responding member.
+ DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
+ // leader is the member ID which the responding member believes is the current leader.
+ Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
+ // raftIndex is the current raft index of the responding member.
+ RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
+ // raftTerm is the current raft term of the responding member.
+ RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
+}
+
+func (m *StatusResponse) Reset() { *m = StatusResponse{} }
+func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
+func (*StatusResponse) ProtoMessage() {}
+func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} }
+
+func (m *StatusResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *StatusResponse) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *StatusResponse) GetDbSize() int64 {
+ if m != nil {
+ return m.DbSize
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetLeader() uint64 {
+ if m != nil {
+ return m.Leader
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftIndex() uint64 {
+ if m != nil {
+ return m.RaftIndex
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftTerm() uint64 {
+ if m != nil {
+ return m.RaftTerm
+ }
+ return 0
+}
+
+type AuthEnableRequest struct {
+}
+
+func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} }
+func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableRequest) ProtoMessage() {}
+func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} }
+
+type AuthDisableRequest struct {
+}
+
+func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} }
+func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableRequest) ProtoMessage() {}
+func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} }
+
+type AuthenticateRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} }
+func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateRequest) ProtoMessage() {}
+func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} }
+
+func (m *AuthenticateRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthenticateRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+type AuthUserAddRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} }
+func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddRequest) ProtoMessage() {}
+func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} }
+
+func (m *AuthUserAddRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserAddRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+type AuthUserGetRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} }
+func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetRequest) ProtoMessage() {}
+func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} }
+
+func (m *AuthUserGetRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthUserDeleteRequest struct {
+ // name is the name of the user to delete.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} }
+func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteRequest) ProtoMessage() {}
+func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} }
+
+func (m *AuthUserDeleteRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthUserChangePasswordRequest struct {
+ // name is the name of the user whose password is being changed.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // password is the new password for the user.
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} }
+func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordRequest) ProtoMessage() {}
+func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{59}
+}
+
+func (m *AuthUserChangePasswordRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+type AuthUserGrantRoleRequest struct {
+ // user is the name of the user which should be granted a given role.
+ User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
+ // role is the name of the role to grant to the user.
+ Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} }
+func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleRequest) ProtoMessage() {}
+func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} }
+
+func (m *AuthUserGrantRoleRequest) GetUser() string {
+ if m != nil {
+ return m.User
+ }
+ return ""
+}
+
+func (m *AuthUserGrantRoleRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthUserRevokeRoleRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} }
+func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleRequest) ProtoMessage() {}
+func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} }
+
+func (m *AuthUserRevokeRoleRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserRevokeRoleRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthRoleAddRequest struct {
+ // name is the name of the role to add to the authentication system.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} }
+func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddRequest) ProtoMessage() {}
+func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} }
+
+func (m *AuthRoleAddRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthRoleGetRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} }
+func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetRequest) ProtoMessage() {}
+func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} }
+
+func (m *AuthRoleGetRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthUserListRequest struct {
+}
+
+func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} }
+func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListRequest) ProtoMessage() {}
+func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} }
+
+type AuthRoleListRequest struct {
+}
+
+func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} }
+func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListRequest) ProtoMessage() {}
+func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} }
+
+type AuthRoleDeleteRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} }
+func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteRequest) ProtoMessage() {}
+func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} }
+
+func (m *AuthRoleDeleteRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthRoleGrantPermissionRequest struct {
+ // name is the name of the role which will be granted the permission.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // perm is the permission to grant to the role.
+ Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"`
+}
+
+func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} }
+func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionRequest) ProtoMessage() {}
+func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{67}
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
+ if m != nil {
+ return m.Perm
+ }
+ return nil
+}
+
+type AuthRoleRevokePermissionRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+ Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} }
+func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionRequest) ProtoMessage() {}
+func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{68}
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return ""
+}
+
+type AuthEnableResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} }
+func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableResponse) ProtoMessage() {}
+func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} }
+
+func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthDisableResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} }
+func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableResponse) ProtoMessage() {}
+func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} }
+
+func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthenticateResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // token is an authorized token that can be used in succeeding RPCs
+ Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
+}
+
+func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} }
+func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateResponse) ProtoMessage() {}
+func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} }
+
+func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthenticateResponse) GetToken() string {
+ if m != nil {
+ return m.Token
+ }
+ return ""
+}
+
+type AuthUserAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} }
+func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddResponse) ProtoMessage() {}
+func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} }
+
+func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserGetResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"`
+}
+
+func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} }
+func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetResponse) ProtoMessage() {}
+func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} }
+
+func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthUserGetResponse) GetRoles() []string {
+ if m != nil {
+ return m.Roles
+ }
+ return nil
+}
+
+type AuthUserDeleteResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} }
+func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteResponse) ProtoMessage() {}
+func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} }
+
+func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserChangePasswordResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} }
+func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordResponse) ProtoMessage() {}
+func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{75}
+}
+
+func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserGrantRoleResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} }
+func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleResponse) ProtoMessage() {}
+func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} }
+
+func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserRevokeRoleResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} }
+func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleResponse) ProtoMessage() {}
+func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} }
+
+func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} }
+func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddResponse) ProtoMessage() {}
+func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} }
+
+func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleGetResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"`
+}
+
+func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} }
+func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetResponse) ProtoMessage() {}
+func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{79} }
+
+func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
+ if m != nil {
+ return m.Perm
+ }
+ return nil
+}
+
+type AuthRoleListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"`
+}
+
+func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} }
+func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListResponse) ProtoMessage() {}
+func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} }
+
+func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthRoleListResponse) GetRoles() []string {
+ if m != nil {
+ return m.Roles
+ }
+ return nil
+}
+
+type AuthUserListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"`
+}
+
+func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} }
+func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListResponse) ProtoMessage() {}
+func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{81} }
+
+func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthUserListResponse) GetUsers() []string {
+ if m != nil {
+ return m.Users
+ }
+ return nil
+}
+
+type AuthRoleDeleteResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} }
+func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteResponse) ProtoMessage() {}
+func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} }
+
+func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleGrantPermissionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} }
+func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionResponse) ProtoMessage() {}
+func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{83}
+}
+
+func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleRevokePermissionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} }
+func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionResponse) ProtoMessage() {}
+func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{84}
+}
+
+func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
+ proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
+ proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
+ proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
+ proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
+ proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
+ proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
+ proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
+ proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
+ proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
+ proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
+ proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
+ proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
+ proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
+ proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
+ proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
+ proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
+ proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
+ proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
+ proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
+ proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
+ proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
+ proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
+ proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
+ proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
+ proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
+ proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
+ proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
+ proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
+ proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
+ proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
+ proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
+ proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
+ proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
+ proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
+ proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
+ proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
+ proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
+ proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
+ proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
+ proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
+ proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
+ proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
+ proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
+ proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
+ proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
+ proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
+ proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
+ proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
+ proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
+ proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
+ proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
+ proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
+ proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
+ proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
+ proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
+ proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
+ proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
+ proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
+ proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
+ proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
+ proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
+ proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
+ proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
+ proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
+ proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
+ proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
+ proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
+ proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
+ proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
+ proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
+ proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
+ proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
+ proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
+ proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
+ proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
+ proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
+ proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
+ proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
+ proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
+ proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
+ proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
+ proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
+ proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
+ proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
+ proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
+ proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
+ proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
+ proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
+ proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
+ proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
+ proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for KV service
+
+type KVClient interface {
+ // Range gets the keys in the range from the key-value store.
+ Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
+}
+
+type kVClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewKVClient(cc *grpc.ClientConn) KVClient {
+ return &kVClient{cc}
+}
+
+func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
+ out := new(RangeResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
+ out := new(PutResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
+ out := new(DeleteRangeResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
+ out := new(TxnResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
+ out := new(CompactionResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for KV service
+
+type KVServer interface {
+ // Range gets the keys in the range from the key-value store.
+ Range(context.Context, *RangeRequest) (*RangeResponse, error)
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ Put(context.Context, *PutRequest) (*PutResponse, error)
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ Txn(context.Context, *TxnRequest) (*TxnResponse, error)
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
+}
+
+func RegisterKVServer(s *grpc.Server, srv KVServer) {
+ s.RegisterService(&_KV_serviceDesc, srv)
+}
+
+func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RangeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Range(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Range",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Range(ctx, req.(*RangeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PutRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Put(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Put",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Put(ctx, req.(*PutRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteRangeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).DeleteRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/DeleteRange",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TxnRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Txn(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Txn",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CompactionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Compact(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Compact",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _KV_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.KV",
+ HandlerType: (*KVServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Range",
+ Handler: _KV_Range_Handler,
+ },
+ {
+ MethodName: "Put",
+ Handler: _KV_Put_Handler,
+ },
+ {
+ MethodName: "DeleteRange",
+ Handler: _KV_DeleteRange_Handler,
+ },
+ {
+ MethodName: "Txn",
+ Handler: _KV_Txn_Handler,
+ },
+ {
+ MethodName: "Compact",
+ Handler: _KV_Compact_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+// Client API for Watch service
+
+type WatchClient interface {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
+}
+
+type watchClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewWatchClient(cc *grpc.ClientConn) WatchClient {
+ return &watchClient{cc}
+}
+
+func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &watchWatchClient{stream}
+ return x, nil
+}
+
+type Watch_WatchClient interface {
+ Send(*WatchRequest) error
+ Recv() (*WatchResponse, error)
+ grpc.ClientStream
+}
+
+type watchWatchClient struct {
+ grpc.ClientStream
+}
+
+func (x *watchWatchClient) Send(m *WatchRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *watchWatchClient) Recv() (*WatchResponse, error) {
+ m := new(WatchResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// Server API for Watch service
+
+type WatchServer interface {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ Watch(Watch_WatchServer) error
+}
+
+func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
+ s.RegisterService(&_Watch_serviceDesc, srv)
+}
+
+func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(WatchServer).Watch(&watchWatchServer{stream})
+}
+
+type Watch_WatchServer interface {
+ Send(*WatchResponse) error
+ Recv() (*WatchRequest, error)
+ grpc.ServerStream
+}
+
+type watchWatchServer struct {
+ grpc.ServerStream
+}
+
+func (x *watchWatchServer) Send(m *WatchResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *watchWatchServer) Recv() (*WatchRequest, error) {
+ m := new(WatchRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _Watch_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Watch",
+ HandlerType: (*WatchServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Watch",
+ Handler: _Watch_Watch_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// Client API for Lease service
+
+type LeaseClient interface {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
+ // LeaseLeases lists all existing leases.
+ LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
+}
+
+type leaseClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
+ return &leaseClient{cc}
+}
+
+func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
+ out := new(LeaseGrantResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
+ out := new(LeaseRevokeResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &leaseLeaseKeepAliveClient{stream}
+ return x, nil
+}
+
+type Lease_LeaseKeepAliveClient interface {
+ Send(*LeaseKeepAliveRequest) error
+ Recv() (*LeaseKeepAliveResponse, error)
+ grpc.ClientStream
+}
+
+type leaseLeaseKeepAliveClient struct {
+ grpc.ClientStream
+}
+
+func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
+ m := new(LeaseKeepAliveResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
+ out := new(LeaseTimeToLiveResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
+ out := new(LeaseLeasesResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Lease service
+
+type LeaseServer interface {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
+ // LeaseLeases lists all existing leases.
+ LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
+}
+
+func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
+ s.RegisterService(&_Lease_serviceDesc, srv)
+}
+
+func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseGrantRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseGrant(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseGrant",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseRevokeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseRevoke(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
+}
+
+type Lease_LeaseKeepAliveServer interface {
+ Send(*LeaseKeepAliveResponse) error
+ Recv() (*LeaseKeepAliveRequest, error)
+ grpc.ServerStream
+}
+
+type leaseLeaseKeepAliveServer struct {
+ grpc.ServerStream
+}
+
+func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
+ m := new(LeaseKeepAliveRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseTimeToLiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseLeasesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseLeases(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseLeases",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Lease_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Lease",
+ HandlerType: (*LeaseServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "LeaseGrant",
+ Handler: _Lease_LeaseGrant_Handler,
+ },
+ {
+ MethodName: "LeaseRevoke",
+ Handler: _Lease_LeaseRevoke_Handler,
+ },
+ {
+ MethodName: "LeaseTimeToLive",
+ Handler: _Lease_LeaseTimeToLive_Handler,
+ },
+ {
+ MethodName: "LeaseLeases",
+ Handler: _Lease_LeaseLeases_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "LeaseKeepAlive",
+ Handler: _Lease_LeaseKeepAlive_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// Client API for Cluster service
+
+type ClusterClient interface {
+ // MemberAdd adds a member into the cluster.
+ MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
+ // MemberRemove removes an existing member from the cluster.
+ MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
+ // MemberUpdate updates the member configuration.
+ MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
+ // MemberList lists all the members in the cluster.
+ MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
+}
+
+type clusterClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
+ return &clusterClient{cc}
+}
+
+func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
+ out := new(MemberAddResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
+ out := new(MemberRemoveResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
+ out := new(MemberUpdateResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
+ out := new(MemberListResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Cluster service
+
+type ClusterServer interface {
+ // MemberAdd adds a member into the cluster.
+ MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
+ // MemberRemove removes an existing member from the cluster.
+ MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
+ // MemberUpdate updates the member configuration.
+ MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
+ // MemberList lists all the members in the cluster.
+ MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
+}
+
+func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
+ s.RegisterService(&_Cluster_serviceDesc, srv)
+}
+
+func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberRemoveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberRemove(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberRemove",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberUpdateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberUpdate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Cluster_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Cluster",
+ HandlerType: (*ClusterServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "MemberAdd",
+ Handler: _Cluster_MemberAdd_Handler,
+ },
+ {
+ MethodName: "MemberRemove",
+ Handler: _Cluster_MemberRemove_Handler,
+ },
+ {
+ MethodName: "MemberUpdate",
+ Handler: _Cluster_MemberUpdate_Handler,
+ },
+ {
+ MethodName: "MemberList",
+ Handler: _Cluster_MemberList_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+// Client API for Maintenance service
+
+type MaintenanceClient interface {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
+ // Status gets the status of the member.
+ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+ // Defragment defragments a member's backend database to recover storage space.
+ Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
+ // Hash computes the hash of the KV's backend.
+ // This is designed for testing; do not use this in production when there
+ // are ongoing transactions.
+ Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
+}
+
+type maintenanceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
+ return &maintenanceClient{cc}
+}
+
+func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
+ out := new(AlarmResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+ out := new(StatusResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
+ out := new(DefragmentResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
+ out := new(HashResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
+ out := new(HashKVResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &maintenanceSnapshotClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Maintenance_SnapshotClient interface {
+ Recv() (*SnapshotResponse, error)
+ grpc.ClientStream
+}
+
+type maintenanceSnapshotClient struct {
+ grpc.ClientStream
+}
+
+func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
+ m := new(SnapshotResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
+ out := new(MoveLeaderResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Maintenance service
+
+type MaintenanceServer interface {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
+ // Status gets the status of the member.
+ Status(context.Context, *StatusRequest) (*StatusResponse, error)
+ // Defragment defragments a member's backend database to recover storage space.
+ Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
+ // Hash computes the hash of the KV's backend.
+ // This is designed for testing; do not use this in production when there
+ // are ongoing transactions.
+ Hash(context.Context, *HashRequest) (*HashResponse, error)
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
+}
+
+func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
+ s.RegisterService(&_Maintenance_serviceDesc, srv)
+}
+
+func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AlarmRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Alarm(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Alarm",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Status(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Status",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DefragmentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Defragment(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Defragment",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HashRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Hash(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Hash",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HashKVRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).HashKV(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/HashKV",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SnapshotRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
+}
+
+type Maintenance_SnapshotServer interface {
+ Send(*SnapshotResponse) error
+ grpc.ServerStream
+}
+
+type maintenanceSnapshotServer struct {
+ grpc.ServerStream
+}
+
+func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MoveLeaderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).MoveLeader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Maintenance_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Maintenance",
+ HandlerType: (*MaintenanceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Alarm",
+ Handler: _Maintenance_Alarm_Handler,
+ },
+ {
+ MethodName: "Status",
+ Handler: _Maintenance_Status_Handler,
+ },
+ {
+ MethodName: "Defragment",
+ Handler: _Maintenance_Defragment_Handler,
+ },
+ {
+ MethodName: "Hash",
+ Handler: _Maintenance_Hash_Handler,
+ },
+ {
+ MethodName: "HashKV",
+ Handler: _Maintenance_HashKV_Handler,
+ },
+ {
+ MethodName: "MoveLeader",
+ Handler: _Maintenance_MoveLeader_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Snapshot",
+ Handler: _Maintenance_Snapshot_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// Client API for Auth service
+
+type AuthClient interface {
+ // AuthEnable enables authentication.
+ AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
+ // AuthDisable disables authentication.
+ AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
+ // Authenticate processes an authenticate request.
+ Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
+ // UserAdd adds a new user.
+ UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
+ // UserGet gets detailed user information.
+ UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
+ // UserList gets a list of all users.
+ UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
+ // UserDelete deletes a specified user.
+ UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
+ // UserChangePassword changes the password of a specified user.
+ UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
+ // UserGrant grants a role to a specified user.
+ UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
+ // UserRevokeRole revokes a role of specified user.
+ UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
+ // RoleAdd adds a new role.
+ RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
+ // RoleGet gets detailed role information.
+ RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
+ // RoleList gets lists of all roles.
+ RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
+ // RoleDelete deletes a specified role.
+ RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
+}
+
+type authClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewAuthClient(cc *grpc.ClientConn) AuthClient {
+ return &authClient{cc}
+}
+
+func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
+ out := new(AuthEnableResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
+ out := new(AuthDisableResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
+ out := new(AuthenticateResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
+ out := new(AuthUserAddResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
+ out := new(AuthUserGetResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
+ out := new(AuthUserListResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
+ out := new(AuthUserDeleteResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
+ out := new(AuthUserChangePasswordResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
+ out := new(AuthUserGrantRoleResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
+ out := new(AuthUserRevokeRoleResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
+ out := new(AuthRoleAddResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
+ out := new(AuthRoleGetResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
+ out := new(AuthRoleListResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
+ out := new(AuthRoleDeleteResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
+ out := new(AuthRoleGrantPermissionResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
+ out := new(AuthRoleRevokePermissionResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Auth service
+
+type AuthServer interface {
+ // AuthEnable enables authentication.
+ AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
+ // AuthDisable disables authentication.
+ AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
+ // Authenticate processes an authenticate request.
+ Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
+ // UserAdd adds a new user.
+ UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
+ // UserGet gets detailed user information.
+ UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
+ // UserList gets a list of all users.
+ UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
+ // UserDelete deletes a specified user.
+ UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
+ // UserChangePassword changes the password of a specified user.
+ UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
+ // UserGrant grants a role to a specified user.
+ UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
+ // UserRevokeRole revokes a role of specified user.
+ UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
+ // RoleAdd adds a new role.
+ RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
+ // RoleGet gets detailed role information.
+ RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
+ // RoleList gets lists of all roles.
+ RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
+ // RoleDelete deletes a specified role.
+ RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
+}
+
+func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
+ s.RegisterService(&_Auth_serviceDesc, srv)
+}
+
+func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthEnableRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).AuthEnable(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/AuthEnable",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthDisableRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).AuthDisable(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/AuthDisable",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthenticateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).Authenticate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/Authenticate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserGetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserGet",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserChangePasswordRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserChangePassword(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserChangePassword",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserGrantRoleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserGrantRole(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserGrantRole",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserRevokeRoleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserRevokeRole(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleGetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleGet",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleGrantPermissionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleGrantPermission(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleRevokePermissionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleRevokePermission(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Auth_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Auth",
+ HandlerType: (*AuthServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "AuthEnable",
+ Handler: _Auth_AuthEnable_Handler,
+ },
+ {
+ MethodName: "AuthDisable",
+ Handler: _Auth_AuthDisable_Handler,
+ },
+ {
+ MethodName: "Authenticate",
+ Handler: _Auth_Authenticate_Handler,
+ },
+ {
+ MethodName: "UserAdd",
+ Handler: _Auth_UserAdd_Handler,
+ },
+ {
+ MethodName: "UserGet",
+ Handler: _Auth_UserGet_Handler,
+ },
+ {
+ MethodName: "UserList",
+ Handler: _Auth_UserList_Handler,
+ },
+ {
+ MethodName: "UserDelete",
+ Handler: _Auth_UserDelete_Handler,
+ },
+ {
+ MethodName: "UserChangePassword",
+ Handler: _Auth_UserChangePassword_Handler,
+ },
+ {
+ MethodName: "UserGrantRole",
+ Handler: _Auth_UserGrantRole_Handler,
+ },
+ {
+ MethodName: "UserRevokeRole",
+ Handler: _Auth_UserRevokeRole_Handler,
+ },
+ {
+ MethodName: "RoleAdd",
+ Handler: _Auth_RoleAdd_Handler,
+ },
+ {
+ MethodName: "RoleGet",
+ Handler: _Auth_RoleGet_Handler,
+ },
+ {
+ MethodName: "RoleList",
+ Handler: _Auth_RoleList_Handler,
+ },
+ {
+ MethodName: "RoleDelete",
+ Handler: _Auth_RoleDelete_Handler,
+ },
+ {
+ MethodName: "RoleGrantPermission",
+ Handler: _Auth_RoleGrantPermission_Handler,
+ },
+ {
+ MethodName: "RoleRevokePermission",
+ Handler: _Auth_RoleRevokePermission_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterId != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
+ }
+ if m.MemberId != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
+ }
+ if m.Revision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ if m.RaftTerm != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+ }
+ return i, nil
+}
+
+func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ if m.Limit != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
+ }
+ if m.Revision != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ if m.SortOrder != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
+ }
+ if m.SortTarget != 0 {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
+ }
+ if m.Serializable {
+ dAtA[i] = 0x38
+ i++
+ if m.Serializable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.KeysOnly {
+ dAtA[i] = 0x40
+ i++
+ if m.KeysOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.CountOnly {
+ dAtA[i] = 0x48
+ i++
+ if m.CountOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.MinModRevision != 0 {
+ dAtA[i] = 0x50
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
+ }
+ if m.MaxModRevision != 0 {
+ dAtA[i] = 0x58
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
+ }
+ if m.MinCreateRevision != 0 {
+ dAtA[i] = 0x60
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
+ }
+ if m.MaxCreateRevision != 0 {
+ dAtA[i] = 0x68
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
+ }
+ return i, nil
+}
+
+func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n1, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if len(m.Kvs) > 0 {
+ for _, msg := range m.Kvs {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.More {
+ dAtA[i] = 0x18
+ i++
+ if m.More {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.Count != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Count))
+ }
+ return i, nil
+}
+
+func (m *PutRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ if m.Lease != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+ }
+ if m.PrevKv {
+ dAtA[i] = 0x20
+ i++
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.IgnoreValue {
+ dAtA[i] = 0x28
+ i++
+ if m.IgnoreValue {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.IgnoreLease {
+ dAtA[i] = 0x30
+ i++
+ if m.IgnoreLease {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *PutResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n2, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if m.PrevKv != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size()))
+ n3, err := m.PrevKv.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ return i, nil
+}
+
+func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ if m.PrevKv {
+ dAtA[i] = 0x18
+ i++
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n4, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ if m.Deleted != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
+ }
+ if len(m.PrevKvs) > 0 {
+ for _, msg := range m.PrevKvs {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *RequestOp) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Request != nil {
+ nn5, err := m.Request.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn5
+ }
+ return i, nil
+}
+
+func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestRange != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size()))
+ n6, err := m.RequestRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestPut != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size()))
+ n7, err := m.RequestPut.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ return i, nil
+}
+func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestDeleteRange != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size()))
+ n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestTxn != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size()))
+ n9, err := m.RequestTxn.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ return i, nil
+}
+func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Response != nil {
+ nn10, err := m.Response.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn10
+ }
+ return i, nil
+}
+
+func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponseRange != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size()))
+ n11, err := m.ResponseRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ }
+ return i, nil
+}
+func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponsePut != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size()))
+ n12, err := m.ResponsePut.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ }
+ return i, nil
+}
+func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponseDeleteRange != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size()))
+ n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ return i, nil
+}
+func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponseTxn != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size()))
+ n14, err := m.ResponseTxn.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ return i, nil
+}
+func (m *Compare) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Result != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Result))
+ }
+ if m.Target != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Target))
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if m.TargetUnion != nil {
+ nn15, err := m.TargetUnion.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn15
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x82
+ i++
+ dAtA[i] = 0x4
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ return i, nil
+}
+
+func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Version))
+ return i, nil
+}
+func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
+ return i, nil
+}
+func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
+ return i, nil
+}
+func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.Value != nil {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ return i, nil
+}
+func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x40
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+ return i, nil
+}
+func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Compare) > 0 {
+ for _, msg := range m.Compare {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Success) > 0 {
+ for _, msg := range m.Success {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Failure) > 0 {
+ for _, msg := range m.Failure {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n16, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ if m.Succeeded {
+ dAtA[i] = 0x10
+ i++
+ if m.Succeeded {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if len(m.Responses) > 0 {
+ for _, msg := range m.Responses {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ if m.Physical {
+ dAtA[i] = 0x10
+ i++
+ if m.Physical {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n17, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ return i, nil
+}
+
+func (m *HashRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ return i, nil
+}
+
+func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n18, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ if m.Hash != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+ }
+ if m.CompactRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+ }
+ return i, nil
+}
+
+func (m *HashResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n19, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ }
+ if m.Hash != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+ }
+ return i, nil
+}
+
+func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n20, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ if m.RemainingBytes != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
+ }
+ if len(m.Blob) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
+ i += copy(dAtA[i:], m.Blob)
+ }
+ return i, nil
+}
+
+func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.RequestUnion != nil {
+ nn21, err := m.RequestUnion.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn21
+ }
+ return i, nil
+}
+
+func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.CreateRequest != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size()))
+ n22, err := m.CreateRequest.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ }
+ return i, nil
+}
+func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.CancelRequest != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size()))
+ n23, err := m.CancelRequest.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ }
+ return i, nil
+}
+func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ if m.StartRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
+ }
+ if m.ProgressNotify {
+ dAtA[i] = 0x20
+ i++
+ if m.ProgressNotify {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if len(m.Filters) > 0 {
+ dAtA25 := make([]byte, len(m.Filters)*10)
+ var j24 int
+ for _, num := range m.Filters {
+ for num >= 1<<7 {
+ dAtA25[j24] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j24++
+ }
+ dAtA25[j24] = uint8(num)
+ j24++
+ }
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(j24))
+ i += copy(dAtA[i:], dAtA25[:j24])
+ }
+ if m.PrevKv {
+ dAtA[i] = 0x30
+ i++
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.WatchId != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ }
+ return i, nil
+}
+
+func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n26, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n26
+ }
+ if m.WatchId != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ }
+ if m.Created {
+ dAtA[i] = 0x18
+ i++
+ if m.Created {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.Canceled {
+ dAtA[i] = 0x20
+ i++
+ if m.Canceled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.CompactRevision != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+ }
+ if len(m.CancelReason) > 0 {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
+ i += copy(dAtA[i:], m.CancelReason)
+ }
+ if len(m.Events) > 0 {
+ for _, msg := range m.Events {
+ dAtA[i] = 0x5a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.TTL != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n27, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n27
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ if len(m.Error) > 0 {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
+ i += copy(dAtA[i:], m.Error)
+ }
+ return i, nil
+}
+
+func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n28, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n28
+ }
+ return i, nil
+}
+
+func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n29, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n29
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ return i, nil
+}
+
+func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.Keys {
+ dAtA[i] = 0x10
+ i++
+ if m.Keys {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n30, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n30
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ if m.GrantedTTL != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
+ }
+ if len(m.Keys) > 0 {
+ for _, b := range m.Keys {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(b)))
+ i += copy(dAtA[i:], b)
+ }
+ }
+ return i, nil
+}
+
+func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n31, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n31
+ }
+ if len(m.Leases) > 0 {
+ for _, msg := range m.Leases {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if len(m.Name) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ if len(m.ClientURLs) > 0 {
+ for _, s := range m.ClientURLs {
+ dAtA[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ dAtA[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n32, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n32
+ }
+ if m.Member != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size()))
+ n33, err := m.Member.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n33
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n34, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n34
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n35, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n35
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n36, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n36
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n37, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n37
+ }
+ return i, nil
+}
+
+func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.TargetID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
+ }
+ return i, nil
+}
+
+func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n38, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n38
+ }
+ return i, nil
+}
+
+func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Action != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+ }
+ if m.MemberID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+ }
+ return i, nil
+}
+
+func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.MemberID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+ }
+ return i, nil
+}
+
+func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n39, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n39
+ }
+ if len(m.Alarms) > 0 {
+ for _, msg := range m.Alarms {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n40, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n40
+ }
+ if len(m.Version) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+ i += copy(dAtA[i:], m.Version)
+ }
+ if m.DbSize != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
+ }
+ if m.Leader != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
+ }
+ if m.RaftIndex != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
+ }
+ if m.RaftTerm != 0 {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+ }
+ return i, nil
+}
+
+func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ return i, nil
+}
+
+func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ return i, nil
+}
+
+func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ return i, nil
+}
+
+func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.User) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
+ i += copy(dAtA[i:], m.User)
+ }
+ if len(m.Role) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Role) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Role) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Role) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if m.Perm != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size()))
+ n41, err := m.Perm.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n41
+ }
+ return i, nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Role) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ return i, nil
+}
+
+func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n42, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n42
+ }
+ return i, nil
+}
+
+func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n43, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n43
+ }
+ return i, nil
+}
+
+func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n44, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n44
+ }
+ if len(m.Token) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
+ i += copy(dAtA[i:], m.Token)
+ }
+ return i, nil
+}
+
+func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n45, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n45
+ }
+ return i, nil
+}
+
+func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n46, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n46
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n47, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n47
+ }
+ return i, nil
+}
+
+func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n48, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n48
+ }
+ return i, nil
+}
+
+func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n49, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n49
+ }
+ return i, nil
+}
+
+func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n50, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n50
+ }
+ return i, nil
+}
+
+func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n51, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n51
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n52, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n52
+ }
+ if len(m.Perm) > 0 {
+ for _, msg := range m.Perm {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n53, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n53
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n54, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n54
+ }
+ if len(m.Users) > 0 {
+ for _, s := range m.Users {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n55, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n55
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n56, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n56
+ }
+ return i, nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n57, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n57
+ }
+ return i, nil
+}
+
+func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *ResponseHeader) Size() (n int) {
+ var l int
+ _ = l
+ if m.ClusterId != 0 {
+ n += 1 + sovRpc(uint64(m.ClusterId))
+ }
+ if m.MemberId != 0 {
+ n += 1 + sovRpc(uint64(m.MemberId))
+ }
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.RaftTerm != 0 {
+ n += 1 + sovRpc(uint64(m.RaftTerm))
+ }
+ return n
+}
+
+func (m *RangeRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Limit != 0 {
+ n += 1 + sovRpc(uint64(m.Limit))
+ }
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.SortOrder != 0 {
+ n += 1 + sovRpc(uint64(m.SortOrder))
+ }
+ if m.SortTarget != 0 {
+ n += 1 + sovRpc(uint64(m.SortTarget))
+ }
+ if m.Serializable {
+ n += 2
+ }
+ if m.KeysOnly {
+ n += 2
+ }
+ if m.CountOnly {
+ n += 2
+ }
+ if m.MinModRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MinModRevision))
+ }
+ if m.MaxModRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MaxModRevision))
+ }
+ if m.MinCreateRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MinCreateRevision))
+ }
+ if m.MaxCreateRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MaxCreateRevision))
+ }
+ return n
+}
+
+func (m *RangeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Kvs) > 0 {
+ for _, e := range m.Kvs {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.More {
+ n += 2
+ }
+ if m.Count != 0 {
+ n += 1 + sovRpc(uint64(m.Count))
+ }
+ return n
+}
+
+func (m *PutRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovRpc(uint64(m.Lease))
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ if m.IgnoreValue {
+ n += 2
+ }
+ if m.IgnoreLease {
+ n += 2
+ }
+ return n
+}
+
+func (m *PutResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.PrevKv != nil {
+ l = m.PrevKv.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *DeleteRangeRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ return n
+}
+
+func (m *DeleteRangeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Deleted != 0 {
+ n += 1 + sovRpc(uint64(m.Deleted))
+ }
+ if len(m.PrevKvs) > 0 {
+ for _, e := range m.PrevKvs {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RequestOp) Size() (n int) {
+ var l int
+ _ = l
+ if m.Request != nil {
+ n += m.Request.Size()
+ }
+ return n
+}
+
+func (m *RequestOp_RequestRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestRange != nil {
+ l = m.RequestRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestPut) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestPut != nil {
+ l = m.RequestPut.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestDeleteRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestDeleteRange != nil {
+ l = m.RequestDeleteRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestTxn) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestTxn != nil {
+ l = m.RequestTxn.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp) Size() (n int) {
+ var l int
+ _ = l
+ if m.Response != nil {
+ n += m.Response.Size()
+ }
+ return n
+}
+
+func (m *ResponseOp_ResponseRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponseRange != nil {
+ l = m.ResponseRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponsePut) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponsePut != nil {
+ l = m.ResponsePut.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponseDeleteRange != nil {
+ l = m.ResponseDeleteRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponseTxn) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponseTxn != nil {
+ l = m.ResponseTxn.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *Compare) Size() (n int) {
+ var l int
+ _ = l
+ if m.Result != 0 {
+ n += 1 + sovRpc(uint64(m.Result))
+ }
+ if m.Target != 0 {
+ n += 1 + sovRpc(uint64(m.Target))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.TargetUnion != nil {
+ n += m.TargetUnion.Size()
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 2 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *Compare_Version) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.Version))
+ return n
+}
+func (m *Compare_CreateRevision) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.CreateRevision))
+ return n
+}
+func (m *Compare_ModRevision) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.ModRevision))
+ return n
+}
+func (m *Compare_Value) Size() (n int) {
+ var l int
+ _ = l
+ if m.Value != nil {
+ l = len(m.Value)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *Compare_Lease) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.Lease))
+ return n
+}
+func (m *TxnRequest) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Compare) > 0 {
+ for _, e := range m.Compare {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.Success) > 0 {
+ for _, e := range m.Success {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.Failure) > 0 {
+ for _, e := range m.Failure {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TxnResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Succeeded {
+ n += 2
+ }
+ if len(m.Responses) > 0 {
+ for _, e := range m.Responses {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CompactionRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.Physical {
+ n += 2
+ }
+ return n
+}
+
+func (m *CompactionResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *HashRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *HashKVRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ return n
+}
+
+func (m *HashKVResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Hash != 0 {
+ n += 1 + sovRpc(uint64(m.Hash))
+ }
+ if m.CompactRevision != 0 {
+ n += 1 + sovRpc(uint64(m.CompactRevision))
+ }
+ return n
+}
+
+func (m *HashResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Hash != 0 {
+ n += 1 + sovRpc(uint64(m.Hash))
+ }
+ return n
+}
+
+func (m *SnapshotRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *SnapshotResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.RemainingBytes != 0 {
+ n += 1 + sovRpc(uint64(m.RemainingBytes))
+ }
+ l = len(m.Blob)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *WatchRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestUnion != nil {
+ n += m.RequestUnion.Size()
+ }
+ return n
+}
+
+func (m *WatchRequest_CreateRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.CreateRequest != nil {
+ l = m.CreateRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchRequest_CancelRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.CancelRequest != nil {
+ l = m.CancelRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchCreateRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.StartRevision != 0 {
+ n += 1 + sovRpc(uint64(m.StartRevision))
+ }
+ if m.ProgressNotify {
+ n += 2
+ }
+ if len(m.Filters) > 0 {
+ l = 0
+ for _, e := range m.Filters {
+ l += sovRpc(uint64(e))
+ }
+ n += 1 + sovRpc(uint64(l)) + l
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ return n
+}
+
+func (m *WatchCancelRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ return n
+}
+
+func (m *WatchResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ if m.Created {
+ n += 2
+ }
+ if m.Canceled {
+ n += 2
+ }
+ if m.CompactRevision != 0 {
+ n += 1 + sovRpc(uint64(m.CompactRevision))
+ }
+ l = len(m.CancelReason)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Events) > 0 {
+ for _, e := range m.Events {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LeaseGrantRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseGrantResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ l = len(m.Error)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaseRevokeRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseRevokeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaseKeepAliveRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseKeepAliveResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ return n
+}
+
+func (m *LeaseTimeToLiveRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.Keys {
+ n += 2
+ }
+ return n
+}
+
+func (m *LeaseTimeToLiveResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ if m.GrantedTTL != 0 {
+ n += 1 + sovRpc(uint64(m.GrantedTTL))
+ }
+ if len(m.Keys) > 0 {
+ for _, b := range m.Keys {
+ l = len(b)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LeaseLeasesRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *LeaseStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseLeasesResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Leases) > 0 {
+ for _, e := range m.Leases {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Member) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.ClientURLs) > 0 {
+ for _, s := range m.ClientURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberAddRequest) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberAddResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Member != nil {
+ l = m.Member.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberRemoveRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *MemberRemoveResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberUpdateRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberUpdateResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberListRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MemberListResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DefragmentRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *DefragmentResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *MoveLeaderRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.TargetID != 0 {
+ n += 1 + sovRpc(uint64(m.TargetID))
+ }
+ return n
+}
+
+func (m *MoveLeaderResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AlarmRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Action != 0 {
+ n += 1 + sovRpc(uint64(m.Action))
+ }
+ if m.MemberID != 0 {
+ n += 1 + sovRpc(uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ n += 1 + sovRpc(uint64(m.Alarm))
+ }
+ return n
+}
+
+func (m *AlarmMember) Size() (n int) {
+ var l int
+ _ = l
+ if m.MemberID != 0 {
+ n += 1 + sovRpc(uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ n += 1 + sovRpc(uint64(m.Alarm))
+ }
+ return n
+}
+
+func (m *AlarmResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Alarms) > 0 {
+ for _, e := range m.Alarms {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.DbSize != 0 {
+ n += 1 + sovRpc(uint64(m.DbSize))
+ }
+ if m.Leader != 0 {
+ n += 1 + sovRpc(uint64(m.Leader))
+ }
+ if m.RaftIndex != 0 {
+ n += 1 + sovRpc(uint64(m.RaftIndex))
+ }
+ if m.RaftTerm != 0 {
+ n += 1 + sovRpc(uint64(m.RaftTerm))
+ }
+ return n
+}
+
+func (m *AuthEnableRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthDisableRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthenticateRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserAddRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGetRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserDeleteRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserChangePasswordRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGrantRoleRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.User)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserRevokeRoleRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleAddRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGetRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserListRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthRoleListRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthRoleDeleteRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Perm != nil {
+ l = m.Perm.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthEnableResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthDisableResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthenticateResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Token)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserAddResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGetResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthUserDeleteResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserChangePasswordResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGrantRoleResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserRevokeRoleResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleAddResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGetResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Perm) > 0 {
+ for _, e := range m.Perm {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthRoleListResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthUserListResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Users) > 0 {
+ for _, s := range m.Users {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthRoleDeleteResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func sovRpc(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRpc(x uint64) (n int) {
+ return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ m.ClusterId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ClusterId |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
+ }
+ m.MemberId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberId |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+ }
+ m.RaftTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftTerm |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RangeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
+ }
+ m.SortOrder = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
+ }
+ m.SortTarget = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Serializable = bool(v != 0)
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.KeysOnly = bool(v != 0)
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CountOnly = bool(v != 0)
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
+ }
+ m.MinModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinModRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
+ }
+ m.MaxModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxModRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
+ }
+ m.MinCreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinCreateRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
+ }
+ m.MaxCreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxCreateRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RangeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
+ if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.More = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Count |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PutRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IgnoreValue = bool(v != 0)
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IgnoreLease = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PutResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PrevKv == nil {
+ m.PrevKv = &mvccpb.KeyValue{}
+ }
+ if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
+ }
+ m.Deleted = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Deleted |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
+ if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RequestOp) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &RangeRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestRange{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &PutRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestPut{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DeleteRangeRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestDeleteRange{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TxnRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestTxn{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResponseOp) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &RangeResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseRange{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &PutResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponsePut{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DeleteRangeResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseDeleteRange{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TxnResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseTxn{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Compare) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Compare: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+ }
+ m.Result = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Result |= (Compare_CompareResult(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+ }
+ m.Target = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_Version{v}
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_CreateRevision{v}
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_ModRevision{v}
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := make([]byte, postIndex-iNdEx)
+ copy(v, dAtA[iNdEx:postIndex])
+ m.TargetUnion = &Compare_Value{v}
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_Lease{v}
+ case 64:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TxnRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Compare = append(m.Compare, &Compare{})
+ if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Success = append(m.Success, &RequestOp{})
+ if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Failure = append(m.Failure, &RequestOp{})
+ if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TxnResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Succeeded = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Responses = append(m.Responses, &ResponseOp{})
+ if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Physical = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ m.Hash = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Hash |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+ }
+ m.CompactRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CompactRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ m.Hash = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Hash |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
+ }
+ m.RemainingBytes = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RemainingBytes |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
+ if m.Blob == nil {
+ m.Blob = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchCreateRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_CreateRequest{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchCancelRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_CancelRequest{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
+ }
+ m.StartRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StartRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ProgressNotify = bool(v != 0)
+ case 5:
+ if wireType == 0 {
+ var v WatchCreateRequest_FilterType
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Filters = append(m.Filters, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ for iNdEx < postIndex {
+ var v WatchCreateRequest_FilterType
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Filters = append(m.Filters, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Created = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Canceled = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+ }
+ m.CompactRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CompactRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CancelReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Events = append(m.Events, &mvccpb.Event{})
+ if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Error = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Keys = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
+ }
+ m.GrantedTTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.GrantedTTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
+ copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Leases = append(m.Leases, &LeaseStatus{})
+ if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Member: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Member == nil {
+ m.Member = &Member{}
+ }
+ if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
+ }
+ m.TargetID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TargetID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+ }
+ m.MemberID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ m.Alarm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Alarm |= (AlarmType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmMember) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+ }
+ m.MemberID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ m.Alarm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Alarm |= (AlarmType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Alarms = append(m.Alarms, &AlarmMember{})
+ if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
+ }
+ m.DbSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DbSize |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ m.Leader = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Leader |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
+ }
+ m.RaftIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftIndex |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+ }
+ m.RaftTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftTerm |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Perm == nil {
+ m.Perm = &authpb.Permission{}
+ }
+ if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Token = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Perm = append(m.Perm, &authpb.Permission{})
+ if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRpc(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRpc
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRpc(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) }
+
+var fileDescriptorRpc = []byte{
+ // 3669 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x6f, 0x23, 0xc7,
+ 0x72, 0xd6, 0x90, 0x22, 0x29, 0x16, 0x2f, 0xe2, 0xb6, 0xb4, 0xbb, 0x14, 0x77, 0x57, 0xab, 0xed,
+ 0xbd, 0x69, 0x2f, 0x16, 0x6d, 0xd9, 0xc9, 0xc3, 0x26, 0x30, 0xac, 0x95, 0xe8, 0x95, 0x2c, 0xad,
+ 0x24, 0x8f, 0xa8, 0xb5, 0x03, 0x38, 0x11, 0x46, 0x64, 0x4b, 0x62, 0x44, 0xce, 0x30, 0x33, 0x43,
+ 0xae, 0xb4, 0x31, 0x12, 0xc0, 0x71, 0x82, 0xbc, 0xe4, 0x25, 0x06, 0x82, 0xc4, 0xaf, 0x41, 0x60,
+ 0xf8, 0x07, 0x04, 0xf9, 0x0b, 0x41, 0x5e, 0x12, 0x20, 0x7f, 0xe0, 0xc0, 0xe7, 0xbc, 0x9c, 0x5f,
+ 0x70, 0x2e, 0x4f, 0x07, 0x7d, 0x9b, 0xe9, 0xb9, 0x51, 0xb2, 0x69, 0xfb, 0x45, 0x3b, 0x5d, 0x5d,
+ 0x5d, 0x55, 0x5d, 0xdd, 0x55, 0xd5, 0xfd, 0x35, 0x17, 0xf2, 0x76, 0xbf, 0xb5, 0xd4, 0xb7, 0x2d,
+ 0xd7, 0x42, 0x45, 0xe2, 0xb6, 0xda, 0x0e, 0xb1, 0x87, 0xc4, 0xee, 0x1f, 0xd6, 0x66, 0x8f, 0xad,
+ 0x63, 0x8b, 0x75, 0xd4, 0xe9, 0x17, 0xe7, 0xa9, 0xcd, 0x51, 0x9e, 0x7a, 0x6f, 0xd8, 0x6a, 0xb1,
+ 0x3f, 0xfd, 0xc3, 0xfa, 0xe9, 0x50, 0x74, 0xdd, 0x60, 0x5d, 0xc6, 0xc0, 0x3d, 0x61, 0x7f, 0xfa,
+ 0x87, 0xec, 0x1f, 0xd1, 0x79, 0xf3, 0xd8, 0xb2, 0x8e, 0xbb, 0xa4, 0x6e, 0xf4, 0x3b, 0x75, 0xc3,
+ 0x34, 0x2d, 0xd7, 0x70, 0x3b, 0x96, 0xe9, 0xf0, 0x5e, 0xfc, 0xf7, 0x1a, 0x94, 0x75, 0xe2, 0xf4,
+ 0x2d, 0xd3, 0x21, 0xeb, 0xc4, 0x68, 0x13, 0x1b, 0xdd, 0x02, 0x68, 0x75, 0x07, 0x8e, 0x4b, 0xec,
+ 0x83, 0x4e, 0xbb, 0xaa, 0x2d, 0x68, 0x8b, 0x93, 0x7a, 0x5e, 0x50, 0x36, 0xda, 0xe8, 0x06, 0xe4,
+ 0x7b, 0xa4, 0x77, 0xc8, 0x7b, 0x53, 0xac, 0x77, 0x8a, 0x13, 0x36, 0xda, 0xa8, 0x06, 0x53, 0x36,
+ 0x19, 0x76, 0x9c, 0x8e, 0x65, 0x56, 0xd3, 0x0b, 0xda, 0x62, 0x5a, 0xf7, 0xda, 0x74, 0xa0, 0x6d,
+ 0x1c, 0xb9, 0x07, 0x2e, 0xb1, 0x7b, 0xd5, 0x49, 0x3e, 0x90, 0x12, 0x9a, 0xc4, 0xee, 0xe1, 0x2f,
+ 0x33, 0x50, 0xd4, 0x0d, 0xf3, 0x98, 0xe8, 0xe4, 0xaf, 0x06, 0xc4, 0x71, 0x51, 0x05, 0xd2, 0xa7,
+ 0xe4, 0x9c, 0xa9, 0x2f, 0xea, 0xf4, 0x93, 0x8f, 0x37, 0x8f, 0xc9, 0x01, 0x31, 0xb9, 0xe2, 0x22,
+ 0x1d, 0x6f, 0x1e, 0x93, 0x86, 0xd9, 0x46, 0xb3, 0x90, 0xe9, 0x76, 0x7a, 0x1d, 0x57, 0x68, 0xe5,
+ 0x8d, 0x80, 0x39, 0x93, 0x21, 0x73, 0x56, 0x01, 0x1c, 0xcb, 0x76, 0x0f, 0x2c, 0xbb, 0x4d, 0xec,
+ 0x6a, 0x66, 0x41, 0x5b, 0x2c, 0x2f, 0xdf, 0x5b, 0x52, 0x17, 0x62, 0x49, 0x35, 0x68, 0x69, 0xcf,
+ 0xb2, 0xdd, 0x1d, 0xca, 0xab, 0xe7, 0x1d, 0xf9, 0x89, 0x3e, 0x84, 0x02, 0x13, 0xe2, 0x1a, 0xf6,
+ 0x31, 0x71, 0xab, 0x59, 0x26, 0xe5, 0xfe, 0x05, 0x52, 0x9a, 0x8c, 0x59, 0x67, 0xea, 0xf9, 0x37,
+ 0xc2, 0x50, 0x74, 0x88, 0xdd, 0x31, 0xba, 0x9d, 0x37, 0xc6, 0x61, 0x97, 0x54, 0x73, 0x0b, 0xda,
+ 0xe2, 0x94, 0x1e, 0xa0, 0xd1, 0xf9, 0x9f, 0x92, 0x73, 0xe7, 0xc0, 0x32, 0xbb, 0xe7, 0xd5, 0x29,
+ 0xc6, 0x30, 0x45, 0x09, 0x3b, 0x66, 0xf7, 0x9c, 0x2d, 0x9a, 0x35, 0x30, 0x5d, 0xde, 0x9b, 0x67,
+ 0xbd, 0x79, 0x46, 0x61, 0xdd, 0x8b, 0x50, 0xe9, 0x75, 0xcc, 0x83, 0x9e, 0xd5, 0x3e, 0xf0, 0x1c,
+ 0x02, 0xcc, 0x21, 0xe5, 0x5e, 0xc7, 0x7c, 0x69, 0xb5, 0x75, 0xe9, 0x16, 0xca, 0x69, 0x9c, 0x05,
+ 0x39, 0x0b, 0x82, 0xd3, 0x38, 0x53, 0x39, 0x97, 0x60, 0x86, 0xca, 0x6c, 0xd9, 0xc4, 0x70, 0x89,
+ 0xcf, 0x5c, 0x64, 0xcc, 0x57, 0x7a, 0x1d, 0x73, 0x95, 0xf5, 0x04, 0xf8, 0x8d, 0xb3, 0x08, 0x7f,
+ 0x49, 0xf0, 0x1b, 0x67, 0x41, 0x7e, 0xbc, 0x04, 0x79, 0xcf, 0xe7, 0x68, 0x0a, 0x26, 0xb7, 0x77,
+ 0xb6, 0x1b, 0x95, 0x09, 0x04, 0x90, 0x5d, 0xd9, 0x5b, 0x6d, 0x6c, 0xaf, 0x55, 0x34, 0x54, 0x80,
+ 0xdc, 0x5a, 0x83, 0x37, 0x52, 0xf8, 0x39, 0x80, 0xef, 0x5d, 0x94, 0x83, 0xf4, 0x66, 0xe3, 0xcf,
+ 0x2a, 0x13, 0x94, 0xe7, 0x55, 0x43, 0xdf, 0xdb, 0xd8, 0xd9, 0xae, 0x68, 0x74, 0xf0, 0xaa, 0xde,
+ 0x58, 0x69, 0x36, 0x2a, 0x29, 0xca, 0xf1, 0x72, 0x67, 0xad, 0x92, 0x46, 0x79, 0xc8, 0xbc, 0x5a,
+ 0xd9, 0xda, 0x6f, 0x54, 0x26, 0xf1, 0x57, 0x1a, 0x94, 0xc4, 0x7a, 0xf1, 0x98, 0x40, 0xef, 0x41,
+ 0xf6, 0x84, 0xc5, 0x05, 0xdb, 0x8a, 0x85, 0xe5, 0x9b, 0xa1, 0xc5, 0x0d, 0xc4, 0x8e, 0x2e, 0x78,
+ 0x11, 0x86, 0xf4, 0xe9, 0xd0, 0xa9, 0xa6, 0x16, 0xd2, 0x8b, 0x85, 0xe5, 0xca, 0x12, 0x0f, 0xd8,
+ 0xa5, 0x4d, 0x72, 0xfe, 0xca, 0xe8, 0x0e, 0x88, 0x4e, 0x3b, 0x11, 0x82, 0xc9, 0x9e, 0x65, 0x13,
+ 0xb6, 0x63, 0xa7, 0x74, 0xf6, 0x4d, 0xb7, 0x31, 0x5b, 0x34, 0xb1, 0x5b, 0x79, 0x03, 0x7f, 0xab,
+ 0x01, 0xec, 0x0e, 0xdc, 0xe4, 0xd0, 0x98, 0x85, 0xcc, 0x90, 0x0a, 0x16, 0x61, 0xc1, 0x1b, 0x2c,
+ 0x26, 0x88, 0xe1, 0x10, 0x2f, 0x26, 0x68, 0x03, 0x5d, 0x87, 0x5c, 0xdf, 0x26, 0xc3, 0x83, 0xd3,
+ 0x21, 0x53, 0x32, 0xa5, 0x67, 0x69, 0x73, 0x73, 0x88, 0xee, 0x40, 0xb1, 0x73, 0x6c, 0x5a, 0x36,
+ 0x39, 0xe0, 0xb2, 0x32, 0xac, 0xb7, 0xc0, 0x69, 0xcc, 0x6e, 0x85, 0x85, 0x0b, 0xce, 0xaa, 0x2c,
+ 0x5b, 0x94, 0x84, 0x4d, 0x28, 0x30, 0x53, 0xc7, 0x72, 0xdf, 0x23, 0xdf, 0xc6, 0x14, 0x1b, 0x16,
+ 0x75, 0xa1, 0xb0, 0x1a, 0x7f, 0x06, 0x68, 0x8d, 0x74, 0x89, 0x4b, 0xc6, 0xc9, 0x1e, 0x8a, 0x4f,
+ 0xd2, 0xaa, 0x4f, 0xf0, 0x3f, 0x6b, 0x30, 0x13, 0x10, 0x3f, 0xd6, 0xb4, 0xaa, 0x90, 0x6b, 0x33,
+ 0x61, 0xdc, 0x82, 0xb4, 0x2e, 0x9b, 0xe8, 0x09, 0x4c, 0x09, 0x03, 0x9c, 0x6a, 0x3a, 0x61, 0xd3,
+ 0xe4, 0xb8, 0x4d, 0x0e, 0xfe, 0x36, 0x05, 0x79, 0x31, 0xd1, 0x9d, 0x3e, 0x5a, 0x81, 0x92, 0xcd,
+ 0x1b, 0x07, 0x6c, 0x3e, 0xc2, 0xa2, 0x5a, 0x72, 0x12, 0x5a, 0x9f, 0xd0, 0x8b, 0x62, 0x08, 0x23,
+ 0xa3, 0x3f, 0x81, 0x82, 0x14, 0xd1, 0x1f, 0xb8, 0xc2, 0xe5, 0xd5, 0xa0, 0x00, 0x7f, 0xff, 0xad,
+ 0x4f, 0xe8, 0x20, 0xd8, 0x77, 0x07, 0x2e, 0x6a, 0xc2, 0xac, 0x1c, 0xcc, 0x67, 0x23, 0xcc, 0x48,
+ 0x33, 0x29, 0x0b, 0x41, 0x29, 0xd1, 0xa5, 0x5a, 0x9f, 0xd0, 0x91, 0x18, 0xaf, 0x74, 0xaa, 0x26,
+ 0xb9, 0x67, 0x3c, 0x79, 0x47, 0x4c, 0x6a, 0x9e, 0x99, 0x51, 0x93, 0x9a, 0x67, 0xe6, 0xf3, 0x3c,
+ 0xe4, 0x44, 0x0b, 0xff, 0x57, 0x0a, 0x40, 0xae, 0xc6, 0x4e, 0x1f, 0xad, 0x41, 0xd9, 0x16, 0xad,
+ 0x80, 0xb7, 0x6e, 0xc4, 0x7a, 0x4b, 0x2c, 0xe2, 0x84, 0x5e, 0x92, 0x83, 0xb8, 0x71, 0xef, 0x43,
+ 0xd1, 0x93, 0xe2, 0x3b, 0x6c, 0x2e, 0xc6, 0x61, 0x9e, 0x84, 0x82, 0x1c, 0x40, 0x5d, 0xf6, 0x09,
+ 0x5c, 0xf5, 0xc6, 0xc7, 0xf8, 0xec, 0xce, 0x08, 0x9f, 0x79, 0x02, 0x67, 0xa4, 0x04, 0xd5, 0x6b,
+ 0xaa, 0x61, 0xbe, 0xdb, 0xe6, 0x62, 0xdc, 0x16, 0x35, 0x8c, 0x3a, 0x0e, 0x68, 0xbd, 0xe4, 0x4d,
+ 0xfc, 0xeb, 0x34, 0xe4, 0x56, 0xad, 0x5e, 0xdf, 0xb0, 0xe9, 0x6a, 0x64, 0x6d, 0xe2, 0x0c, 0xba,
+ 0x2e, 0x73, 0x57, 0x79, 0xf9, 0x6e, 0x50, 0xa2, 0x60, 0x93, 0xff, 0xea, 0x8c, 0x55, 0x17, 0x43,
+ 0xe8, 0x60, 0x51, 0x1e, 0x53, 0x97, 0x18, 0x2c, 0x8a, 0xa3, 0x18, 0x22, 0x03, 0x39, 0xed, 0x07,
+ 0x72, 0x0d, 0x72, 0x43, 0x62, 0xfb, 0x25, 0x7d, 0x7d, 0x42, 0x97, 0x04, 0xf4, 0x08, 0xa6, 0xc3,
+ 0xe5, 0x25, 0x23, 0x78, 0xca, 0xad, 0x60, 0x35, 0xba, 0x0b, 0xc5, 0x40, 0x8d, 0xcb, 0x0a, 0xbe,
+ 0x42, 0x4f, 0x29, 0x71, 0xd7, 0x64, 0x5e, 0xa5, 0xf5, 0xb8, 0xb8, 0x3e, 0x21, 0x33, 0xeb, 0x35,
+ 0x99, 0x59, 0xa7, 0xc4, 0x28, 0x91, 0x5b, 0x03, 0x49, 0xe6, 0x83, 0x60, 0x92, 0xc1, 0x1f, 0x40,
+ 0x29, 0xe0, 0x20, 0x5a, 0x77, 0x1a, 0x1f, 0xef, 0xaf, 0x6c, 0xf1, 0x22, 0xf5, 0x82, 0xd5, 0x25,
+ 0xbd, 0xa2, 0xd1, 0x5a, 0xb7, 0xd5, 0xd8, 0xdb, 0xab, 0xa4, 0x50, 0x09, 0xf2, 0xdb, 0x3b, 0xcd,
+ 0x03, 0xce, 0x95, 0xc6, 0x2f, 0x3c, 0x09, 0xa2, 0xc8, 0x29, 0xb5, 0x6d, 0x42, 0xa9, 0x6d, 0x9a,
+ 0xac, 0x6d, 0x29, 0xbf, 0xb6, 0xb1, 0x32, 0xb7, 0xd5, 0x58, 0xd9, 0x6b, 0x54, 0x26, 0x9f, 0x97,
+ 0xa1, 0xc8, 0xfd, 0x7b, 0x30, 0x30, 0x69, 0xa9, 0xfd, 0x77, 0x0d, 0xc0, 0x8f, 0x26, 0x54, 0x87,
+ 0x5c, 0x8b, 0xeb, 0xa9, 0x6a, 0x2c, 0x19, 0x5d, 0x8d, 0x5d, 0x32, 0x5d, 0x72, 0xa1, 0x77, 0x20,
+ 0xe7, 0x0c, 0x5a, 0x2d, 0xe2, 0xc8, 0x92, 0x77, 0x3d, 0x9c, 0x0f, 0x45, 0xb6, 0xd2, 0x25, 0x1f,
+ 0x1d, 0x72, 0x64, 0x74, 0xba, 0x03, 0x56, 0x00, 0x47, 0x0f, 0x11, 0x7c, 0xf8, 0xdf, 0x34, 0x28,
+ 0x28, 0x9b, 0xf7, 0x07, 0x26, 0xe1, 0x9b, 0x90, 0x67, 0x36, 0x90, 0xb6, 0x48, 0xc3, 0x53, 0xba,
+ 0x4f, 0x40, 0x7f, 0x0c, 0x79, 0x19, 0x01, 0x32, 0x13, 0x57, 0xe3, 0xc5, 0xee, 0xf4, 0x75, 0x9f,
+ 0x15, 0x6f, 0xc2, 0x15, 0xe6, 0x95, 0x16, 0x3d, 0x5c, 0x4b, 0x3f, 0xaa, 0xc7, 0x4f, 0x2d, 0x74,
+ 0xfc, 0xac, 0xc1, 0x54, 0xff, 0xe4, 0xdc, 0xe9, 0xb4, 0x8c, 0xae, 0xb0, 0xc2, 0x6b, 0xe3, 0x8f,
+ 0x00, 0xa9, 0xc2, 0xc6, 0x99, 0x2e, 0x2e, 0x41, 0x61, 0xdd, 0x70, 0x4e, 0x84, 0x49, 0xf8, 0x09,
+ 0x94, 0x68, 0x73, 0xf3, 0xd5, 0x25, 0x6c, 0x64, 0x97, 0x03, 0xc9, 0x3d, 0x96, 0xcf, 0x11, 0x4c,
+ 0x9e, 0x18, 0xce, 0x09, 0x9b, 0x68, 0x49, 0x67, 0xdf, 0xe8, 0x11, 0x54, 0x5a, 0x7c, 0x92, 0x07,
+ 0xa1, 0x2b, 0xc3, 0xb4, 0xa0, 0x7b, 0x27, 0xc1, 0x4f, 0xa1, 0xc8, 0xe7, 0xf0, 0x63, 0x1b, 0x81,
+ 0xaf, 0xc0, 0xf4, 0x9e, 0x69, 0xf4, 0x9d, 0x13, 0x4b, 0x56, 0x37, 0x3a, 0xe9, 0x8a, 0x4f, 0x1b,
+ 0x4b, 0xe3, 0x43, 0x98, 0xb6, 0x49, 0xcf, 0xe8, 0x98, 0x1d, 0xf3, 0xf8, 0xe0, 0xf0, 0xdc, 0x25,
+ 0x8e, 0xb8, 0x30, 0x95, 0x3d, 0xf2, 0x73, 0x4a, 0xa5, 0xa6, 0x1d, 0x76, 0xad, 0x43, 0x91, 0xe6,
+ 0xd8, 0x37, 0xfe, 0x4f, 0x0d, 0x8a, 0x9f, 0x18, 0x6e, 0x4b, 0x2e, 0x1d, 0xda, 0x80, 0xb2, 0x97,
+ 0xdc, 0x18, 0x45, 0xd8, 0x12, 0x2a, 0xb1, 0x6c, 0x8c, 0x3c, 0x4a, 0xcb, 0xea, 0x58, 0x6a, 0xa9,
+ 0x04, 0x26, 0xca, 0x30, 0x5b, 0xa4, 0xeb, 0x89, 0x4a, 0x25, 0x8b, 0x62, 0x8c, 0xaa, 0x28, 0x95,
+ 0xf0, 0x7c, 0xda, 0x3f, 0x7e, 0xf0, 0x5c, 0xf2, 0x75, 0x0a, 0x50, 0xd4, 0x86, 0xef, 0x7b, 0x22,
+ 0xbb, 0x0f, 0x65, 0xc7, 0x35, 0xec, 0xc8, 0xde, 0x28, 0x31, 0xaa, 0x97, 0xa0, 0x1f, 0xc2, 0x74,
+ 0xdf, 0xb6, 0x8e, 0x6d, 0xe2, 0x38, 0x07, 0xa6, 0xe5, 0x76, 0x8e, 0xce, 0xc5, 0xa1, 0xb6, 0x2c,
+ 0xc9, 0xdb, 0x8c, 0x8a, 0x1a, 0x90, 0x3b, 0xea, 0x74, 0x5d, 0x62, 0x3b, 0xd5, 0xcc, 0x42, 0x7a,
+ 0xb1, 0xbc, 0xfc, 0xe4, 0x22, 0xaf, 0x2d, 0x7d, 0xc8, 0xf8, 0x9b, 0xe7, 0x7d, 0xa2, 0xcb, 0xb1,
+ 0xea, 0x41, 0x31, 0x1b, 0x38, 0x28, 0xde, 0x07, 0xf0, 0xf9, 0x69, 0xaa, 0xdd, 0xde, 0xd9, 0xdd,
+ 0x6f, 0x56, 0x26, 0x50, 0x11, 0xa6, 0xb6, 0x77, 0xd6, 0x1a, 0x5b, 0x0d, 0x9a, 0x97, 0x71, 0x5d,
+ 0xfa, 0x46, 0xf5, 0x21, 0x9a, 0x83, 0xa9, 0xd7, 0x94, 0x2a, 0xef, 0xdb, 0x69, 0x3d, 0xc7, 0xda,
+ 0x1b, 0x6d, 0xfc, 0x4f, 0x29, 0x28, 0x89, 0x5d, 0x30, 0xd6, 0x56, 0x54, 0x55, 0xa4, 0x02, 0x2a,
+ 0xe8, 0xa9, 0x94, 0xef, 0x8e, 0xb6, 0x38, 0xfc, 0xca, 0x26, 0xcd, 0x0d, 0x7c, 0xb1, 0x49, 0x5b,
+ 0xb8, 0xd5, 0x6b, 0xc7, 0x86, 0x6f, 0x26, 0x36, 0x7c, 0xd1, 0x5d, 0x28, 0x79, 0xbb, 0xcd, 0x70,
+ 0x44, 0xad, 0xcd, 0xeb, 0x45, 0xb9, 0x91, 0x28, 0x0d, 0xdd, 0x87, 0x2c, 0x19, 0x12, 0xd3, 0x75,
+ 0xaa, 0x05, 0x96, 0x75, 0x4b, 0xf2, 0xfc, 0xdb, 0xa0, 0x54, 0x5d, 0x74, 0xe2, 0x3f, 0x82, 0x2b,
+ 0xec, 0x9e, 0xf1, 0xc2, 0x36, 0x4c, 0xf5, 0x42, 0xd4, 0x6c, 0x6e, 0x09, 0xd7, 0xd1, 0x4f, 0x54,
+ 0x86, 0xd4, 0xc6, 0x9a, 0x98, 0x68, 0x6a, 0x63, 0x0d, 0x7f, 0xa1, 0x01, 0x52, 0xc7, 0x8d, 0xe5,
+ 0xcb, 0x90, 0x70, 0xa9, 0x3e, 0xed, 0xab, 0x9f, 0x85, 0x0c, 0xb1, 0x6d, 0xcb, 0x66, 0x5e, 0xcb,
+ 0xeb, 0xbc, 0x81, 0xef, 0x09, 0x1b, 0x74, 0x32, 0xb4, 0x4e, 0xbd, 0xc0, 0xe0, 0xd2, 0x34, 0xcf,
+ 0xd4, 0x4d, 0x98, 0x09, 0x70, 0x8d, 0x95, 0xfd, 0x1f, 0xc2, 0x55, 0x26, 0x6c, 0x93, 0x90, 0xfe,
+ 0x4a, 0xb7, 0x33, 0x4c, 0xd4, 0xda, 0x87, 0x6b, 0x61, 0xc6, 0x9f, 0xd6, 0x47, 0xf8, 0x4f, 0x85,
+ 0xc6, 0x66, 0xa7, 0x47, 0x9a, 0xd6, 0x56, 0xb2, 0x6d, 0x34, 0x3b, 0x9e, 0x92, 0x73, 0x47, 0x94,
+ 0x49, 0xf6, 0x8d, 0xff, 0x43, 0x83, 0xeb, 0x91, 0xe1, 0x3f, 0xf1, 0xaa, 0xce, 0x03, 0x1c, 0xd3,
+ 0xed, 0x43, 0xda, 0xb4, 0x83, 0xdf, 0xd0, 0x15, 0x8a, 0x67, 0x27, 0x4d, 0x30, 0x45, 0x61, 0xe7,
+ 0xac, 0x58, 0x73, 0xf6, 0xc7, 0x91, 0x35, 0xe6, 0x16, 0x14, 0x18, 0x61, 0xcf, 0x35, 0xdc, 0x81,
+ 0x13, 0x59, 0x8c, 0xbf, 0x11, 0x5b, 0x40, 0x0e, 0x1a, 0x6b, 0x5e, 0xef, 0x40, 0x96, 0x1d, 0x4e,
+ 0xe5, 0xd1, 0x2c, 0x74, 0x1b, 0x50, 0xec, 0xd0, 0x05, 0x23, 0x3e, 0x81, 0xec, 0x4b, 0x86, 0xe8,
+ 0x29, 0x96, 0x4d, 0xca, 0xa5, 0x30, 0x8d, 0x1e, 0xc7, 0x19, 0xf2, 0x3a, 0xfb, 0x66, 0x27, 0x19,
+ 0x42, 0xec, 0x7d, 0x7d, 0x8b, 0x9f, 0x98, 0xf2, 0xba, 0xd7, 0xa6, 0x2e, 0x6b, 0x75, 0x3b, 0xc4,
+ 0x74, 0x59, 0xef, 0x24, 0xeb, 0x55, 0x28, 0x78, 0x09, 0x2a, 0x5c, 0xd3, 0x4a, 0xbb, 0xad, 0x9c,
+ 0x48, 0x3c, 0x79, 0x5a, 0x50, 0x1e, 0xfe, 0x46, 0x83, 0x2b, 0xca, 0x80, 0xb1, 0x1c, 0xf3, 0x14,
+ 0xb2, 0x1c, 0xb7, 0x14, 0xc5, 0x6f, 0x36, 0x38, 0x8a, 0xab, 0xd1, 0x05, 0x0f, 0x5a, 0x82, 0x1c,
+ 0xff, 0x92, 0xc7, 0xc2, 0x78, 0x76, 0xc9, 0x84, 0xef, 0xc3, 0x8c, 0x20, 0x91, 0x9e, 0x15, 0xb7,
+ 0xb7, 0x99, 0x43, 0xf1, 0xe7, 0x30, 0x1b, 0x64, 0x1b, 0x6b, 0x4a, 0x8a, 0x91, 0xa9, 0xcb, 0x18,
+ 0xb9, 0x22, 0x8d, 0xdc, 0xef, 0xb7, 0x95, 0x5a, 0x1d, 0x5e, 0x75, 0x75, 0x45, 0x52, 0xa1, 0x15,
+ 0xf1, 0x26, 0x20, 0x45, 0xfc, 0xac, 0x13, 0x98, 0x91, 0xdb, 0x61, 0xab, 0xe3, 0x78, 0x27, 0xb8,
+ 0x37, 0x80, 0x54, 0xe2, 0xcf, 0x6d, 0xd0, 0x1a, 0x39, 0xb2, 0x8d, 0xe3, 0x1e, 0xf1, 0xea, 0x13,
+ 0x3d, 0xcf, 0xab, 0xc4, 0xb1, 0x32, 0x7a, 0x1d, 0xae, 0xbc, 0xb4, 0x86, 0x34, 0x35, 0x50, 0xaa,
+ 0x1f, 0x32, 0xfc, 0x3e, 0xe7, 0x2d, 0x9b, 0xd7, 0xa6, 0xca, 0xd5, 0x01, 0x63, 0x29, 0xff, 0x5f,
+ 0x0d, 0x8a, 0x2b, 0x5d, 0xc3, 0xee, 0x49, 0xc5, 0xef, 0x43, 0x96, 0xdf, 0x52, 0x04, 0x30, 0xf0,
+ 0x20, 0x28, 0x46, 0xe5, 0xe5, 0x8d, 0x15, 0x7e, 0xa7, 0x11, 0xa3, 0xa8, 0xe1, 0xe2, 0xed, 0x60,
+ 0x2d, 0xf4, 0x96, 0xb0, 0x86, 0xde, 0x82, 0x8c, 0x41, 0x87, 0xb0, 0x14, 0x5c, 0x0e, 0xdf, 0x0f,
+ 0x99, 0x34, 0x76, 0x38, 0xe3, 0x5c, 0xf8, 0x3d, 0x28, 0x28, 0x1a, 0xe8, 0x0d, 0xf8, 0x45, 0x43,
+ 0x1c, 0xc0, 0x56, 0x56, 0x9b, 0x1b, 0xaf, 0xf8, 0xc5, 0xb8, 0x0c, 0xb0, 0xd6, 0xf0, 0xda, 0x29,
+ 0xfc, 0xa9, 0x18, 0x25, 0xf2, 0x9d, 0x6a, 0x8f, 0x96, 0x64, 0x4f, 0xea, 0x52, 0xf6, 0x9c, 0x41,
+ 0x49, 0x4c, 0x7f, 0xdc, 0xf4, 0xcd, 0xe4, 0x25, 0xa4, 0x6f, 0xc5, 0x78, 0x5d, 0x30, 0xe2, 0x69,
+ 0x28, 0x89, 0x84, 0x2e, 0xf6, 0xdf, 0xff, 0x68, 0x50, 0x96, 0x94, 0x71, 0x01, 0x4c, 0x89, 0xbd,
+ 0xf0, 0x0a, 0xe0, 0x21, 0x2f, 0xd7, 0x20, 0xdb, 0x3e, 0xdc, 0xeb, 0xbc, 0x91, 0x60, 0xb3, 0x68,
+ 0x51, 0x7a, 0x97, 0xeb, 0xe1, 0x2f, 0x3e, 0xa2, 0x45, 0x6f, 0xe1, 0xb6, 0x71, 0xe4, 0x6e, 0x98,
+ 0x6d, 0x72, 0xc6, 0xce, 0x8d, 0x93, 0xba, 0x4f, 0x60, 0x97, 0x52, 0xf1, 0x32, 0xc4, 0x0e, 0x8b,
+ 0xea, 0x4b, 0xd1, 0x0c, 0x5c, 0x59, 0x19, 0xb8, 0x27, 0x0d, 0xd3, 0x38, 0xec, 0xca, 0x8c, 0x45,
+ 0xcb, 0x2c, 0x25, 0xae, 0x75, 0x1c, 0x95, 0xda, 0x80, 0x19, 0x4a, 0x25, 0xa6, 0xdb, 0x69, 0x29,
+ 0xe9, 0x4d, 0x16, 0x31, 0x2d, 0x54, 0xc4, 0x0c, 0xc7, 0x79, 0x6d, 0xd9, 0x6d, 0x31, 0x35, 0xaf,
+ 0x8d, 0xd7, 0xb8, 0xf0, 0x7d, 0x27, 0x50, 0xa6, 0xbe, 0xaf, 0x94, 0x45, 0x5f, 0xca, 0x0b, 0xe2,
+ 0x8e, 0x90, 0x82, 0x9f, 0xc0, 0x55, 0xc9, 0x29, 0xc0, 0xbd, 0x11, 0xcc, 0x3b, 0x70, 0x4b, 0x32,
+ 0xaf, 0x9e, 0xd0, 0xdb, 0xd3, 0xae, 0x50, 0xf8, 0x43, 0xed, 0x7c, 0x0e, 0x55, 0xcf, 0x4e, 0x76,
+ 0x58, 0xb6, 0xba, 0xaa, 0x01, 0x03, 0x47, 0xec, 0x99, 0xbc, 0xce, 0xbe, 0x29, 0xcd, 0xb6, 0xba,
+ 0xde, 0x91, 0x80, 0x7e, 0xe3, 0x55, 0x98, 0x93, 0x32, 0xc4, 0x31, 0x36, 0x28, 0x24, 0x62, 0x50,
+ 0x9c, 0x10, 0xe1, 0x30, 0x3a, 0x74, 0xb4, 0xdb, 0x55, 0xce, 0xa0, 0x6b, 0x99, 0x4c, 0x4d, 0x91,
+ 0x79, 0x95, 0xef, 0x08, 0x6a, 0x98, 0x5a, 0x31, 0x04, 0x99, 0x0a, 0x50, 0xc9, 0x62, 0x21, 0x28,
+ 0x39, 0xb2, 0x10, 0x11, 0xd1, 0x9f, 0xc1, 0xbc, 0x67, 0x04, 0xf5, 0xdb, 0x2e, 0xb1, 0x7b, 0x1d,
+ 0xc7, 0x51, 0xe0, 0xa0, 0xb8, 0x89, 0x3f, 0x80, 0xc9, 0x3e, 0x11, 0x39, 0xa5, 0xb0, 0x8c, 0x96,
+ 0xf8, 0xfb, 0xed, 0x92, 0x32, 0x98, 0xf5, 0xe3, 0x36, 0xdc, 0x96, 0xd2, 0xb9, 0x47, 0x63, 0xc5,
+ 0x87, 0x8d, 0x92, 0xb7, 0x6e, 0xee, 0xd6, 0xe8, 0xad, 0x3b, 0xcd, 0xd7, 0xde, 0x83, 0x28, 0x3f,
+ 0xe2, 0x8e, 0x94, 0xb1, 0x35, 0x56, 0xad, 0xd8, 0xe4, 0x3e, 0xf5, 0x42, 0x72, 0x2c, 0x61, 0x87,
+ 0x30, 0x1b, 0x8c, 0xe4, 0xb1, 0xd2, 0xd8, 0x2c, 0x64, 0x5c, 0xeb, 0x94, 0xc8, 0x24, 0xc6, 0x1b,
+ 0xd2, 0x60, 0x2f, 0xcc, 0xc7, 0x32, 0xd8, 0xf0, 0x85, 0xb1, 0x2d, 0x39, 0xae, 0xbd, 0x74, 0x35,
+ 0xe5, 0xe1, 0x8b, 0x37, 0xf0, 0x36, 0x5c, 0x0b, 0xa7, 0x89, 0xb1, 0x4c, 0x7e, 0xc5, 0x37, 0x70,
+ 0x5c, 0x26, 0x19, 0x4b, 0xee, 0xc7, 0x7e, 0x32, 0x50, 0x12, 0xca, 0x58, 0x22, 0x75, 0xa8, 0xc5,
+ 0xe5, 0x97, 0x1f, 0x63, 0xbf, 0x7a, 0xe9, 0x66, 0x2c, 0x61, 0x8e, 0x2f, 0x6c, 0xfc, 0xe5, 0xf7,
+ 0x73, 0x44, 0x7a, 0x64, 0x8e, 0x10, 0x41, 0xe2, 0x67, 0xb1, 0x9f, 0x60, 0xd3, 0x09, 0x1d, 0x7e,
+ 0x02, 0x1d, 0x57, 0x07, 0xad, 0x21, 0x9e, 0x0e, 0xd6, 0x90, 0x1b, 0x5b, 0x4d, 0xbb, 0x63, 0x2d,
+ 0xc6, 0x27, 0x7e, 0xee, 0x8c, 0x64, 0xe6, 0xb1, 0x04, 0x7f, 0x0a, 0x0b, 0xc9, 0x49, 0x79, 0x1c,
+ 0xc9, 0x8f, 0xeb, 0x90, 0xf7, 0x0e, 0x94, 0xca, 0x6f, 0x1f, 0x0a, 0x90, 0xdb, 0xde, 0xd9, 0xdb,
+ 0x5d, 0x59, 0x6d, 0xf0, 0x1f, 0x3f, 0xac, 0xee, 0xe8, 0xfa, 0xfe, 0x6e, 0xb3, 0x92, 0x5a, 0xfe,
+ 0x6d, 0x1a, 0x52, 0x9b, 0xaf, 0xd0, 0x9f, 0x43, 0x86, 0xbf, 0x04, 0x8e, 0x78, 0xfe, 0xad, 0x8d,
+ 0x7a, 0xec, 0xc4, 0x37, 0xbe, 0xf8, 0xff, 0x5f, 0x7d, 0x95, 0xba, 0x8a, 0x2b, 0xf5, 0xe1, 0xbb,
+ 0x87, 0xc4, 0x35, 0xea, 0xa7, 0xc3, 0x3a, 0xab, 0x0f, 0xcf, 0xb4, 0xc7, 0x68, 0x1f, 0xd2, 0xbb,
+ 0x03, 0x17, 0x25, 0x3e, 0x0d, 0xd7, 0x92, 0xdf, 0x40, 0xf1, 0x1c, 0x13, 0x3c, 0x83, 0xcb, 0x8a,
+ 0xe0, 0xfe, 0xc0, 0xa5, 0x62, 0x07, 0x50, 0x50, 0x5f, 0x31, 0x2f, 0x7c, 0x33, 0xae, 0x5d, 0xfc,
+ 0x42, 0x8a, 0xef, 0x30, 0x75, 0x37, 0xf0, 0x35, 0x45, 0x1d, 0x7f, 0x6b, 0x55, 0x67, 0xd3, 0x3c,
+ 0x33, 0x51, 0xe2, 0xab, 0x72, 0x2d, 0xf9, 0xe1, 0x34, 0x76, 0x36, 0xee, 0x99, 0x49, 0xc5, 0x9a,
+ 0xe2, 0xdd, 0xb4, 0xe5, 0xa2, 0xdb, 0x31, 0xef, 0x66, 0xea, 0x0b, 0x51, 0x6d, 0x21, 0x99, 0x41,
+ 0x28, 0x5a, 0x60, 0x8a, 0x6a, 0xf8, 0xaa, 0xa2, 0xa8, 0xe5, 0xb1, 0x3d, 0xd3, 0x1e, 0x2f, 0x1f,
+ 0x43, 0x86, 0x21, 0xc4, 0xe8, 0x2f, 0xe4, 0x47, 0x2d, 0x06, 0xdb, 0x4e, 0x58, 0xfc, 0x00, 0xb6,
+ 0x8c, 0xab, 0x4c, 0x19, 0xc2, 0x25, 0xa9, 0x8c, 0x61, 0xc4, 0xcf, 0xb4, 0xc7, 0x8b, 0xda, 0xdb,
+ 0xda, 0xf2, 0x6f, 0x26, 0x21, 0xc3, 0xe0, 0x22, 0x64, 0x01, 0xf8, 0x68, 0x6a, 0x78, 0x96, 0x11,
+ 0x7c, 0x36, 0x3c, 0xcb, 0x28, 0x10, 0x8b, 0xe7, 0x99, 0xe2, 0x2a, 0x9e, 0x91, 0x8a, 0x19, 0x12,
+ 0x55, 0x67, 0xe0, 0x1a, 0xf5, 0xe9, 0x50, 0x00, 0x66, 0x3c, 0xcc, 0x50, 0x9c, 0xc0, 0x00, 0xaa,
+ 0x1a, 0xde, 0x21, 0x31, 0x88, 0x2a, 0xc6, 0x4c, 0xe7, 0x4d, 0x7c, 0x5d, 0xf1, 0x2c, 0x57, 0x6b,
+ 0x33, 0x46, 0xaa, 0xf7, 0xef, 0x34, 0x28, 0x07, 0x71, 0x51, 0x74, 0x37, 0x46, 0x72, 0x18, 0x5e,
+ 0xad, 0xdd, 0x1b, 0xcd, 0x94, 0x64, 0x01, 0x57, 0x7f, 0x4a, 0x48, 0xdf, 0xa0, 0x8c, 0xc2, 0xf1,
+ 0xe8, 0x1f, 0x34, 0x98, 0x0e, 0x81, 0x9d, 0x28, 0x4e, 0x43, 0x04, 0x4a, 0xad, 0xdd, 0xbf, 0x80,
+ 0x4b, 0x18, 0xf2, 0x80, 0x19, 0xb2, 0x80, 0x6f, 0x44, 0x5c, 0xe1, 0x76, 0x7a, 0xc4, 0xb5, 0x84,
+ 0x31, 0xde, 0x32, 0x70, 0x60, 0x32, 0x76, 0x19, 0x02, 0x40, 0x67, 0xec, 0x32, 0x04, 0x51, 0xcd,
+ 0x11, 0xcb, 0xc0, 0xd1, 0x48, 0xba, 0xc5, 0x7f, 0x97, 0x86, 0xdc, 0x2a, 0xff, 0x05, 0x22, 0x72,
+ 0x20, 0xef, 0x21, 0x80, 0x68, 0x3e, 0x0e, 0x8d, 0xf1, 0x6f, 0x0b, 0xb5, 0xdb, 0x89, 0xfd, 0x42,
+ 0xfb, 0x7d, 0xa6, 0xfd, 0x36, 0xae, 0x49, 0xed, 0xe2, 0x87, 0x8e, 0x75, 0x7e, 0xed, 0xaf, 0x1b,
+ 0xed, 0x36, 0x9d, 0xf8, 0xdf, 0x42, 0x51, 0x85, 0xe9, 0xd0, 0x9d, 0x58, 0x14, 0x48, 0x45, 0xfa,
+ 0x6a, 0x78, 0x14, 0x8b, 0xd0, 0xbe, 0xc8, 0xb4, 0x63, 0x7c, 0x2b, 0x41, 0xbb, 0xcd, 0xd8, 0x03,
+ 0x06, 0x70, 0x98, 0x2d, 0xde, 0x80, 0x00, 0x8a, 0x17, 0x6f, 0x40, 0x10, 0xa5, 0xbb, 0xd0, 0x80,
+ 0x01, 0x63, 0xa7, 0x06, 0xbc, 0x06, 0xf0, 0x41, 0x35, 0x14, 0xeb, 0x57, 0xe5, 0xea, 0x14, 0x0e,
+ 0xf9, 0x28, 0x1e, 0x17, 0xdd, 0x73, 0x21, 0xd5, 0xdd, 0x8e, 0x43, 0x43, 0x7f, 0xf9, 0x9b, 0x2c,
+ 0x14, 0x5e, 0x1a, 0x1d, 0xd3, 0x25, 0xa6, 0x61, 0xb6, 0x08, 0x3a, 0x82, 0x0c, 0x2b, 0x8d, 0xe1,
+ 0x2c, 0xa7, 0x62, 0x4d, 0xe1, 0x2c, 0x17, 0x00, 0x62, 0xf0, 0x3d, 0xa6, 0x79, 0x1e, 0xcf, 0x49,
+ 0xcd, 0x3d, 0x5f, 0x7c, 0x9d, 0x61, 0x28, 0x74, 0xc2, 0x7f, 0x09, 0x59, 0x01, 0xcf, 0x87, 0x84,
+ 0x05, 0xb0, 0x95, 0xda, 0xcd, 0xf8, 0xce, 0xa4, 0xed, 0xa5, 0xaa, 0x72, 0x18, 0x2f, 0xd5, 0xf5,
+ 0x06, 0xc0, 0x07, 0x08, 0xc3, 0xce, 0x8d, 0xe0, 0x89, 0xb5, 0x85, 0x64, 0x06, 0xa1, 0xf7, 0x11,
+ 0xd3, 0x7b, 0x17, 0xcf, 0xc7, 0xe9, 0x6d, 0x7b, 0xfc, 0x54, 0xf7, 0x21, 0x4c, 0xae, 0x1b, 0xce,
+ 0x09, 0x0a, 0x15, 0x3b, 0xe5, 0x47, 0x03, 0xb5, 0x5a, 0x5c, 0x97, 0xd0, 0x74, 0x97, 0x69, 0xba,
+ 0x85, 0xab, 0x71, 0x9a, 0x4e, 0x0c, 0x87, 0x56, 0x0f, 0x74, 0x02, 0x59, 0xfe, 0x3b, 0x82, 0xb0,
+ 0x2f, 0x03, 0xbf, 0x45, 0x08, 0xfb, 0x32, 0xf8, 0xd3, 0x83, 0xcb, 0x69, 0x72, 0x61, 0x4a, 0x3e,
+ 0xde, 0xa3, 0x5b, 0xa1, 0xa5, 0x09, 0x3e, 0xf4, 0xd7, 0xe6, 0x93, 0xba, 0x85, 0xbe, 0x87, 0x4c,
+ 0xdf, 0x1d, 0x7c, 0x33, 0x76, 0xed, 0x04, 0xf7, 0x33, 0xed, 0xf1, 0xdb, 0x1a, 0x2d, 0x13, 0xe0,
+ 0x83, 0xac, 0x91, 0xe8, 0x08, 0xe3, 0xb5, 0x91, 0xe8, 0x88, 0xe0, 0xb3, 0x78, 0x99, 0x29, 0x7f,
+ 0x8a, 0x1f, 0xc6, 0x29, 0x77, 0x6d, 0xc3, 0x74, 0x8e, 0x88, 0xfd, 0x16, 0x07, 0xd3, 0x9c, 0x93,
+ 0x4e, 0x9f, 0x46, 0xca, 0xef, 0xa7, 0x61, 0x92, 0x9e, 0x47, 0x69, 0x79, 0xf6, 0xaf, 0xf1, 0x61,
+ 0x6b, 0x22, 0xe0, 0x59, 0xd8, 0x9a, 0x28, 0x02, 0x10, 0x2d, 0xcf, 0xec, 0xb7, 0xe6, 0x84, 0x31,
+ 0x51, 0xaf, 0x3b, 0x50, 0x50, 0xee, 0xfa, 0x28, 0x46, 0x60, 0x10, 0x99, 0x0b, 0xd7, 0x85, 0x18,
+ 0xa0, 0x00, 0xdf, 0x66, 0x3a, 0xe7, 0xf0, 0x6c, 0x40, 0x67, 0x9b, 0x73, 0x51, 0xa5, 0x7f, 0x0d,
+ 0x45, 0x15, 0x13, 0x40, 0x31, 0x32, 0x43, 0xc8, 0x5f, 0x38, 0x25, 0xc6, 0x41, 0x0a, 0xd1, 0xec,
+ 0xe0, 0xfd, 0xae, 0x5e, 0xb2, 0x52, 0xe5, 0x7d, 0xc8, 0x09, 0xa0, 0x20, 0x6e, 0xb6, 0x41, 0xa8,
+ 0x30, 0x6e, 0xb6, 0x21, 0x94, 0x21, 0x7a, 0xcc, 0x63, 0x5a, 0xe9, 0x7d, 0x48, 0x96, 0x20, 0xa1,
+ 0xf1, 0x05, 0x71, 0x93, 0x34, 0xfa, 0xd8, 0x57, 0x92, 0x46, 0xe5, 0x2e, 0x3a, 0x4a, 0xe3, 0x31,
+ 0x71, 0x45, 0x2c, 0xc9, 0x7b, 0x1e, 0x4a, 0x10, 0xa8, 0xa6, 0x7c, 0x3c, 0x8a, 0x25, 0xe9, 0x54,
+ 0xee, 0x2b, 0x15, 0xf9, 0x1e, 0x7d, 0x0e, 0xe0, 0x43, 0x1a, 0xe1, 0xd3, 0x56, 0x2c, 0x2e, 0x1a,
+ 0x3e, 0x6d, 0xc5, 0xa3, 0x22, 0xd1, 0xfc, 0xe1, 0xeb, 0xe6, 0x17, 0x03, 0xaa, 0xfd, 0x5f, 0x34,
+ 0x40, 0x51, 0x04, 0x04, 0x3d, 0x89, 0xd7, 0x10, 0x8b, 0xb8, 0xd6, 0x9e, 0x5e, 0x8e, 0x39, 0xa9,
+ 0x44, 0xf8, 0x66, 0xb5, 0xd8, 0x88, 0xfe, 0x6b, 0x6a, 0xd8, 0x97, 0x1a, 0x94, 0x02, 0x10, 0x0a,
+ 0x7a, 0x90, 0xb0, 0xc6, 0x21, 0xd0, 0xb6, 0xf6, 0xf0, 0x42, 0xbe, 0xa4, 0x93, 0x98, 0xb2, 0x23,
+ 0xe4, 0x41, 0xfc, 0x1f, 0x35, 0x28, 0x07, 0x61, 0x17, 0x94, 0x20, 0x3f, 0x02, 0xfc, 0xd6, 0x16,
+ 0x2f, 0x66, 0xbc, 0x78, 0xa9, 0xfc, 0xb3, 0x79, 0x1f, 0x72, 0x02, 0xac, 0x89, 0x0b, 0x88, 0x20,
+ 0x6c, 0x1c, 0x17, 0x10, 0x21, 0xa4, 0x27, 0x21, 0x20, 0x6c, 0xab, 0x4b, 0x94, 0x10, 0x14, 0x88,
+ 0x4e, 0x92, 0xc6, 0xd1, 0x21, 0x18, 0x82, 0x83, 0x46, 0x69, 0xf4, 0x43, 0x50, 0xc2, 0x39, 0x28,
+ 0x41, 0xe0, 0x05, 0x21, 0x18, 0x46, 0x83, 0x12, 0x42, 0x90, 0x29, 0x55, 0x42, 0xd0, 0x07, 0x5f,
+ 0xe2, 0x42, 0x30, 0x82, 0x88, 0xc7, 0x85, 0x60, 0x14, 0xbf, 0x49, 0x58, 0x57, 0xa6, 0x3b, 0x10,
+ 0x82, 0x33, 0x31, 0x58, 0x0d, 0x7a, 0x9a, 0xe0, 0xd0, 0x58, 0xb0, 0xbd, 0xf6, 0xd6, 0x25, 0xb9,
+ 0x47, 0xee, 0x7d, 0xbe, 0x14, 0x72, 0xef, 0x7f, 0xad, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74,
+ 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0xcb, 0xb2, 0x5f, 0xec, 0x35, 0x2f, 0x1a, 0x9e, 0x57, 0xfe, 0xfb,
+ 0xbb, 0x79, 0xed, 0xff, 0xbe, 0x9b, 0xd7, 0x7e, 0xf1, 0xdd, 0xbc, 0xf6, 0xaf, 0xbf, 0x9c, 0x9f,
+ 0x38, 0xcc, 0xb2, 0xff, 0xe1, 0xf5, 0xee, 0x1f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x74, 0x55, 0x61,
+ 0xe6, 0x68, 0x36, 0x00, 0x00,
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
deleted file mode 100644
index 4f0b157..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "bytes"
- "context"
- "crypto/sha1"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "path"
- "sort"
- "strings"
- "sync"
- "time"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/netutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/store"
- "github.com/coreos/etcd/version"
-
- "github.com/coreos/go-semver/semver"
-)
-
-// RaftCluster is a list of Members that belong to the same raft cluster
-type RaftCluster struct {
- id types.ID
- token string
-
- store store.Store
- be backend.Backend
-
- sync.Mutex // guards the fields below
- version *semver.Version
- members map[types.ID]*Member
- // removed contains the ids of removed members in the cluster.
- // removed id cannot be reused.
- removed map[types.ID]bool
-}
-
-func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, error) {
- c := NewCluster(token)
- for name, urls := range urlsmap {
- m := NewMember(name, urls, token, nil)
- if _, ok := c.members[m.ID]; ok {
- return nil, fmt.Errorf("member exists with identical ID %v", m)
- }
- if uint64(m.ID) == raft.None {
- return nil, fmt.Errorf("cannot use %x as member id", raft.None)
- }
- c.members[m.ID] = m
- }
- c.genID()
- return c, nil
-}
-
-func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftCluster {
- c := NewCluster(token)
- c.id = id
- for _, m := range membs {
- c.members[m.ID] = m
- }
- return c
-}
-
-func NewCluster(token string) *RaftCluster {
- return &RaftCluster{
- token: token,
- members: make(map[types.ID]*Member),
- removed: make(map[types.ID]bool),
- }
-}
-
-func (c *RaftCluster) ID() types.ID { return c.id }
-
-func (c *RaftCluster) Members() []*Member {
- c.Lock()
- defer c.Unlock()
- var ms MembersByID
- for _, m := range c.members {
- ms = append(ms, m.Clone())
- }
- sort.Sort(ms)
- return []*Member(ms)
-}
-
-func (c *RaftCluster) Member(id types.ID) *Member {
- c.Lock()
- defer c.Unlock()
- return c.members[id].Clone()
-}
-
-// MemberByName returns a Member with the given name if exists.
-// If more than one member has the given name, it will panic.
-func (c *RaftCluster) MemberByName(name string) *Member {
- c.Lock()
- defer c.Unlock()
- var memb *Member
- for _, m := range c.members {
- if m.Name == name {
- if memb != nil {
- plog.Panicf("two members with the given name %q exist", name)
- }
- memb = m
- }
- }
- return memb.Clone()
-}
-
-func (c *RaftCluster) MemberIDs() []types.ID {
- c.Lock()
- defer c.Unlock()
- var ids []types.ID
- for _, m := range c.members {
- ids = append(ids, m.ID)
- }
- sort.Sort(types.IDSlice(ids))
- return ids
-}
-
-func (c *RaftCluster) IsIDRemoved(id types.ID) bool {
- c.Lock()
- defer c.Unlock()
- return c.removed[id]
-}
-
-// PeerURLs returns a list of all peer addresses.
-// The returned list is sorted in ascending lexicographical order.
-func (c *RaftCluster) PeerURLs() []string {
- c.Lock()
- defer c.Unlock()
- urls := make([]string, 0)
- for _, p := range c.members {
- urls = append(urls, p.PeerURLs...)
- }
- sort.Strings(urls)
- return urls
-}
-
-// ClientURLs returns a list of all client addresses.
-// The returned list is sorted in ascending lexicographical order.
-func (c *RaftCluster) ClientURLs() []string {
- c.Lock()
- defer c.Unlock()
- urls := make([]string, 0)
- for _, p := range c.members {
- urls = append(urls, p.ClientURLs...)
- }
- sort.Strings(urls)
- return urls
-}
-
-func (c *RaftCluster) String() string {
- c.Lock()
- defer c.Unlock()
- b := &bytes.Buffer{}
- fmt.Fprintf(b, "{ClusterID:%s ", c.id)
- var ms []string
- for _, m := range c.members {
- ms = append(ms, fmt.Sprintf("%+v", m))
- }
- fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " "))
- var ids []string
- for id := range c.removed {
- ids = append(ids, id.String())
- }
- fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " "))
- return b.String()
-}
-
-func (c *RaftCluster) genID() {
- mIDs := c.MemberIDs()
- b := make([]byte, 8*len(mIDs))
- for i, id := range mIDs {
- binary.BigEndian.PutUint64(b[8*i:], uint64(id))
- }
- hash := sha1.Sum(b)
- c.id = types.ID(binary.BigEndian.Uint64(hash[:8]))
-}
-
-func (c *RaftCluster) SetID(id types.ID) { c.id = id }
-
-func (c *RaftCluster) SetStore(st store.Store) { c.store = st }
-
-func (c *RaftCluster) SetBackend(be backend.Backend) {
- c.be = be
- mustCreateBackendBuckets(c.be)
-}
-
-func (c *RaftCluster) Recover(onSet func(*semver.Version)) {
- c.Lock()
- defer c.Unlock()
-
- c.members, c.removed = membersFromStore(c.store)
- c.version = clusterVersionFromStore(c.store)
- mustDetectDowngrade(c.version)
- onSet(c.version)
-
- for _, m := range c.members {
- plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
- }
- if c.version != nil {
- plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String()))
- }
-}
-
-// ValidateConfigurationChange takes a proposed ConfChange and
-// ensures that it is still valid.
-func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
- members, removed := membersFromStore(c.store)
- id := types.ID(cc.NodeID)
- if removed[id] {
- return ErrIDRemoved
- }
- switch cc.Type {
- case raftpb.ConfChangeAddNode:
- if members[id] != nil {
- return ErrIDExists
- }
- urls := make(map[string]bool)
- for _, m := range members {
- for _, u := range m.PeerURLs {
- urls[u] = true
- }
- }
- m := new(Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- for _, u := range m.PeerURLs {
- if urls[u] {
- return ErrPeerURLexists
- }
- }
- case raftpb.ConfChangeRemoveNode:
- if members[id] == nil {
- return ErrIDNotFound
- }
- case raftpb.ConfChangeUpdateNode:
- if members[id] == nil {
- return ErrIDNotFound
- }
- urls := make(map[string]bool)
- for _, m := range members {
- if m.ID == id {
- continue
- }
- for _, u := range m.PeerURLs {
- urls[u] = true
- }
- }
- m := new(Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- for _, u := range m.PeerURLs {
- if urls[u] {
- return ErrPeerURLexists
- }
- }
- default:
- plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode")
- }
- return nil
-}
-
-// AddMember adds a new Member into the cluster, and saves the given member's
-// raftAttributes into the store. The given member should have empty attributes.
-// A Member with a matching id must not exist.
-func (c *RaftCluster) AddMember(m *Member) {
- c.Lock()
- defer c.Unlock()
- if c.store != nil {
- mustSaveMemberToStore(c.store, m)
- }
- if c.be != nil {
- mustSaveMemberToBackend(c.be, m)
- }
-
- c.members[m.ID] = m
-
- plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
-}
-
-// RemoveMember removes a member from the store.
-// The given id MUST exist, or the function panics.
-func (c *RaftCluster) RemoveMember(id types.ID) {
- c.Lock()
- defer c.Unlock()
- if c.store != nil {
- mustDeleteMemberFromStore(c.store, id)
- }
- if c.be != nil {
- mustDeleteMemberFromBackend(c.be, id)
- }
-
- delete(c.members, id)
- c.removed[id] = true
-
- plog.Infof("removed member %s from cluster %s", id, c.id)
-}
-
-func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
- c.Lock()
- defer c.Unlock()
- if m, ok := c.members[id]; ok {
- m.Attributes = attr
- if c.store != nil {
- mustUpdateMemberAttrInStore(c.store, m)
- }
- if c.be != nil {
- mustSaveMemberToBackend(c.be, m)
- }
- return
- }
- _, ok := c.removed[id]
- if !ok {
- plog.Panicf("error updating attributes of unknown member %s", id)
- }
- plog.Warningf("skipped updating attributes of removed member %s", id)
-}
-
-func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
- c.Lock()
- defer c.Unlock()
-
- c.members[id].RaftAttributes = raftAttr
- if c.store != nil {
- mustUpdateMemberInStore(c.store, c.members[id])
- }
- if c.be != nil {
- mustSaveMemberToBackend(c.be, c.members[id])
- }
-
- plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
-}
-
-func (c *RaftCluster) Version() *semver.Version {
- c.Lock()
- defer c.Unlock()
- if c.version == nil {
- return nil
- }
- return semver.Must(semver.NewVersion(c.version.String()))
-}
-
-func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) {
- c.Lock()
- defer c.Unlock()
- if c.version != nil {
- plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String()))
- } else {
- plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String()))
- }
- c.version = ver
- mustDetectDowngrade(c.version)
- if c.store != nil {
- mustSaveClusterVersionToStore(c.store, ver)
- }
- if c.be != nil {
- mustSaveClusterVersionToBackend(c.be, ver)
- }
- onSet(ver)
-}
-
-func (c *RaftCluster) IsReadyToAddNewMember() bool {
- nmembers := 1
- nstarted := 0
-
- for _, member := range c.members {
- if member.IsStarted() {
- nstarted++
- }
- nmembers++
- }
-
- if nstarted == 1 && nmembers == 2 {
- // a case of adding a new node to 1-member cluster for restoring cluster data
- // https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster
-
- plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
- return true
- }
-
- nquorum := nmembers/2 + 1
- if nstarted < nquorum {
- plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
- return false
- }
-
- return true
-}
-
-func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool {
- nmembers := 0
- nstarted := 0
-
- for _, member := range c.members {
- if uint64(member.ID) == id {
- continue
- }
-
- if member.IsStarted() {
- nstarted++
- }
- nmembers++
- }
-
- nquorum := nmembers/2 + 1
- if nstarted < nquorum {
- plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
- return false
- }
-
- return true
-}
-
-func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) {
- members := make(map[types.ID]*Member)
- removed := make(map[types.ID]bool)
- e, err := st.Get(StoreMembersPrefix, true, true)
- if err != nil {
- if isKeyNotFound(err) {
- return members, removed
- }
- plog.Panicf("get storeMembers should never fail: %v", err)
- }
- for _, n := range e.Node.Nodes {
- var m *Member
- m, err = nodeToMember(n)
- if err != nil {
- plog.Panicf("nodeToMember should never fail: %v", err)
- }
- members[m.ID] = m
- }
-
- e, err = st.Get(storeRemovedMembersPrefix, true, true)
- if err != nil {
- if isKeyNotFound(err) {
- return members, removed
- }
- plog.Panicf("get storeRemovedMembers should never fail: %v", err)
- }
- for _, n := range e.Node.Nodes {
- removed[MustParseMemberIDFromKey(n.Key)] = true
- }
- return members, removed
-}
-
-func clusterVersionFromStore(st store.Store) *semver.Version {
- e, err := st.Get(path.Join(storePrefix, "version"), false, false)
- if err != nil {
- if isKeyNotFound(err) {
- return nil
- }
- plog.Panicf("unexpected error (%v) when getting cluster version from store", err)
- }
- return semver.Must(semver.NewVersion(*e.Node.Value))
-}
-
-// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs
-// with the existing cluster. If the validation succeeds, it assigns the IDs
-// from the existing cluster to the local cluster.
-// If the validation fails, an error will be returned.
-func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) error {
- ems := existing.Members()
- lms := local.Members()
- if len(ems) != len(lms) {
- return fmt.Errorf("member count is unequal")
- }
- sort.Sort(MembersByPeerURLs(ems))
- sort.Sort(MembersByPeerURLs(lms))
-
- ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
- defer cancel()
- for i := range ems {
- if ok, err := netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
- return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
- }
- lms[i].ID = ems[i].ID
- }
- local.members = make(map[types.ID]*Member)
- for _, m := range lms {
- local.members[m.ID] = m
- }
- return nil
-}
-
-func mustDetectDowngrade(cv *semver.Version) {
- lv := semver.Must(semver.NewVersion(version.Version))
- // only keep major.minor version for comparison against cluster version
- lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
- if cv != nil && lv.LessThan(*cv) {
- plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String()))
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go
deleted file mode 100644
index b07fb2d..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package membership describes individual etcd members and clusters of members.
-package membership
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go b/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go
deleted file mode 100644
index e4d36af..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "errors"
-
- etcdErr "github.com/coreos/etcd/error"
-)
-
-var (
- ErrIDRemoved = errors.New("membership: ID removed")
- ErrIDExists = errors.New("membership: ID exists")
- ErrIDNotFound = errors.New("membership: ID not found")
- ErrPeerURLexists = errors.New("membership: peerURL exists")
-)
-
-func isKeyNotFound(err error) bool {
- e, ok := err.(*etcdErr.Error)
- return ok && e.ErrorCode == etcdErr.EcodeKeyNotFound
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go b/vendor/github.com/coreos/etcd/etcdserver/membership/member.go
deleted file mode 100644
index 6de74d2..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "crypto/sha1"
- "encoding/binary"
- "fmt"
- "math/rand"
- "sort"
- "time"
-
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership")
-)
-
-// RaftAttributes represents the raft related attributes of an etcd member.
-type RaftAttributes struct {
- // PeerURLs is the list of peers in the raft cluster.
- // TODO(philips): ensure these are URLs
- PeerURLs []string `json:"peerURLs"`
-}
-
-// Attributes represents all the non-raft related attributes of an etcd member.
-type Attributes struct {
- Name string `json:"name,omitempty"`
- ClientURLs []string `json:"clientURLs,omitempty"`
-}
-
-type Member struct {
- ID types.ID `json:"id"`
- RaftAttributes
- Attributes
-}
-
-// NewMember creates a Member without an ID and generates one based on the
-// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member.
-func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
- m := &Member{
- RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()},
- Attributes: Attributes{Name: name},
- }
-
- var b []byte
- sort.Strings(m.PeerURLs)
- for _, p := range m.PeerURLs {
- b = append(b, []byte(p)...)
- }
-
- b = append(b, []byte(clusterName)...)
- if now != nil {
- b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...)
- }
-
- hash := sha1.Sum(b)
- m.ID = types.ID(binary.BigEndian.Uint64(hash[:8]))
- return m
-}
-
-// PickPeerURL chooses a random address from a given Member's PeerURLs.
-// It will panic if there is no PeerURLs available in Member.
-func (m *Member) PickPeerURL() string {
- if len(m.PeerURLs) == 0 {
- plog.Panicf("member should always have some peer url")
- }
- return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
-}
-
-func (m *Member) Clone() *Member {
- if m == nil {
- return nil
- }
- mm := &Member{
- ID: m.ID,
- Attributes: Attributes{
- Name: m.Name,
- },
- }
- if m.PeerURLs != nil {
- mm.PeerURLs = make([]string, len(m.PeerURLs))
- copy(mm.PeerURLs, m.PeerURLs)
- }
- if m.ClientURLs != nil {
- mm.ClientURLs = make([]string, len(m.ClientURLs))
- copy(mm.ClientURLs, m.ClientURLs)
- }
- return mm
-}
-
-func (m *Member) IsStarted() bool {
- return len(m.Name) != 0
-}
-
-// MembersByID implements sort by ID interface
-type MembersByID []*Member
-
-func (ms MembersByID) Len() int { return len(ms) }
-func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID }
-func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
-
-// MembersByPeerURLs implements sort by peer urls interface
-type MembersByPeerURLs []*Member
-
-func (ms MembersByPeerURLs) Len() int { return len(ms) }
-func (ms MembersByPeerURLs) Less(i, j int) bool {
- return ms[i].PeerURLs[0] < ms[j].PeerURLs[0]
-}
-func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go
deleted file mode 100644
index d3f8f24..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "encoding/json"
- "fmt"
- "path"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/store"
-
- "github.com/coreos/go-semver/semver"
-)
-
-const (
- attributesSuffix = "attributes"
- raftAttributesSuffix = "raftAttributes"
-
- // the prefix for stroing membership related information in store provided by store pkg.
- storePrefix = "/0"
-)
-
-var (
- membersBucketName = []byte("members")
- membersRemovedBucketName = []byte("members_removed")
- clusterBucketName = []byte("cluster")
-
- StoreMembersPrefix = path.Join(storePrefix, "members")
- storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members")
-)
-
-func mustSaveMemberToBackend(be backend.Backend, m *Member) {
- mkey := backendMemberKey(m.ID)
- mvalue, err := json.Marshal(m)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
-
- tx := be.BatchTx()
- tx.Lock()
- tx.UnsafePut(membersBucketName, mkey, mvalue)
- tx.Unlock()
-}
-
-func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) {
- mkey := backendMemberKey(id)
-
- tx := be.BatchTx()
- tx.Lock()
- tx.UnsafeDelete(membersBucketName, mkey)
- tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed"))
- tx.Unlock()
-}
-
-func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) {
- ckey := backendClusterVersionKey()
-
- tx := be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
- tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String()))
-}
-
-func mustSaveMemberToStore(s store.Store, m *Member) {
- b, err := json.Marshal(m.RaftAttributes)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
- p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
- if _, err := s.Create(p, false, string(b), false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("create raftAttributes should never fail: %v", err)
- }
-}
-
-func mustDeleteMemberFromStore(s store.Store, id types.ID) {
- if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil {
- plog.Panicf("delete member should never fail: %v", err)
- }
- if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("create removedMember should never fail: %v", err)
- }
-}
-
-func mustUpdateMemberInStore(s store.Store, m *Member) {
- b, err := json.Marshal(m.RaftAttributes)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
- p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
- if _, err := s.Update(p, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("update raftAttributes should never fail: %v", err)
- }
-}
-
-func mustUpdateMemberAttrInStore(s store.Store, m *Member) {
- b, err := json.Marshal(m.Attributes)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
- p := path.Join(MemberStoreKey(m.ID), attributesSuffix)
- if _, err := s.Set(p, false, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("update raftAttributes should never fail: %v", err)
- }
-}
-
-func mustSaveClusterVersionToStore(s store.Store, ver *semver.Version) {
- if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("save cluster version should never fail: %v", err)
- }
-}
-
-// nodeToMember builds member from a key value node.
-// the child nodes of the given node MUST be sorted by key.
-func nodeToMember(n *store.NodeExtern) (*Member, error) {
- m := &Member{ID: MustParseMemberIDFromKey(n.Key)}
- attrs := make(map[string][]byte)
- raftAttrKey := path.Join(n.Key, raftAttributesSuffix)
- attrKey := path.Join(n.Key, attributesSuffix)
- for _, nn := range n.Nodes {
- if nn.Key != raftAttrKey && nn.Key != attrKey {
- return nil, fmt.Errorf("unknown key %q", nn.Key)
- }
- attrs[nn.Key] = []byte(*nn.Value)
- }
- if data := attrs[raftAttrKey]; data != nil {
- if err := json.Unmarshal(data, &m.RaftAttributes); err != nil {
- return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err)
- }
- } else {
- return nil, fmt.Errorf("raftAttributes key doesn't exist")
- }
- if data := attrs[attrKey]; data != nil {
- if err := json.Unmarshal(data, &m.Attributes); err != nil {
- return m, fmt.Errorf("unmarshal attributes error: %v", err)
- }
- }
- return m, nil
-}
-
-func backendMemberKey(id types.ID) []byte {
- return []byte(id.String())
-}
-
-func backendClusterVersionKey() []byte {
- return []byte("clusterVersion")
-}
-
-func mustCreateBackendBuckets(be backend.Backend) {
- tx := be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
- tx.UnsafeCreateBucket(membersBucketName)
- tx.UnsafeCreateBucket(membersRemovedBucketName)
- tx.UnsafeCreateBucket(clusterBucketName)
-}
-
-func MemberStoreKey(id types.ID) string {
- return path.Join(StoreMembersPrefix, id.String())
-}
-
-func StoreClusterVersionKey() string {
- return path.Join(storePrefix, "version")
-}
-
-func MemberAttributesStorePath(id types.ID) string {
- return path.Join(MemberStoreKey(id), attributesSuffix)
-}
-
-func MustParseMemberIDFromKey(key string) types.ID {
- id, err := types.IDFromString(path.Base(key))
- if err != nil {
- plog.Panicf("unexpected parse member id error: %v", err)
- }
- return id
-}
-
-func RemovedMemberStoreKey(id types.ID) string {
- return path.Join(storeRemovedMembersPrefix, id.String())
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go
deleted file mode 100644
index 10f8a47..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- goruntime "runtime"
- "time"
-
- "github.com/coreos/etcd/pkg/runtime"
- "github.com/coreos/etcd/version"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "has_leader",
- Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
- })
- isLeader = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "is_leader",
- Help: "Whether or not this member is a leader. 1 if is, 0 otherwise.",
- })
- leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "leader_changes_seen_total",
- Help: "The number of leader changes seen.",
- })
- heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "heartbeat_send_failures_total",
- Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).",
- })
- slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "slow_apply_total",
- Help: "The total number of slow apply requests (likely overloaded from slow disk).",
- })
- proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_committed_total",
- Help: "The total number of consensus proposals committed.",
- })
- proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_applied_total",
- Help: "The total number of consensus proposals applied.",
- })
- proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_pending",
- Help: "The current number of pending proposals to commit.",
- })
- proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_failed_total",
- Help: "The total number of failed proposals seen.",
- })
- leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "server",
- Name: "lease_expired_total",
- Help: "The total number of expired leases.",
- })
- slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "slow_read_indexes_total",
- Help: "The total number of pending read indexes not in sync with leader's or timed out read index requests.",
- })
- readIndexFailed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "read_indexes_failed_total",
- Help: "The total number of failed read indexes seen.",
- })
- quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "quota_backend_bytes",
- Help: "Current backend storage quota size in bytes.",
- })
- currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "version",
- Help: "Which version is running. 1 for 'server_version' label with current version.",
- },
- []string{"server_version"})
- currentGoVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "go_version",
- Help: "Which Go version server is running with. 1 for 'server_go_version' label with current version.",
- },
- []string{"server_go_version"})
- serverID = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "id",
- Help: "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.",
- },
- []string{"server_id"})
-)
-
-func init() {
- prometheus.MustRegister(hasLeader)
- prometheus.MustRegister(isLeader)
- prometheus.MustRegister(leaderChanges)
- prometheus.MustRegister(heartbeatSendFailures)
- prometheus.MustRegister(slowApplies)
- prometheus.MustRegister(proposalsCommitted)
- prometheus.MustRegister(proposalsApplied)
- prometheus.MustRegister(proposalsPending)
- prometheus.MustRegister(proposalsFailed)
- prometheus.MustRegister(leaseExpired)
- prometheus.MustRegister(slowReadIndex)
- prometheus.MustRegister(readIndexFailed)
- prometheus.MustRegister(quotaBackendBytes)
- prometheus.MustRegister(currentVersion)
- prometheus.MustRegister(currentGoVersion)
- prometheus.MustRegister(serverID)
-
- currentVersion.With(prometheus.Labels{
- "server_version": version.Version,
- }).Set(1)
- currentGoVersion.With(prometheus.Labels{
- "server_go_version": goruntime.Version(),
- }).Set(1)
-}
-
-func monitorFileDescriptor(done <-chan struct{}) {
- ticker := time.NewTicker(5 * time.Second)
- defer ticker.Stop()
- for {
- used, err := runtime.FDUsage()
- if err != nil {
- plog.Errorf("cannot monitor file descriptor usage (%v)", err)
- return
- }
- limit, err := runtime.FDLimit()
- if err != nil {
- plog.Errorf("cannot monitor file descriptor usage (%v)", err)
- return
- }
- if used >= limit/5*4 {
- plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit)
- }
- select {
- case <-ticker.C:
- case <-done:
- return
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go
deleted file mode 100644
index 882eb76..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/quota.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-const (
- // DefaultQuotaBytes is the number of bytes the backend Size may
- // consume before exceeding the space quota.
- DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
- // MaxQuotaBytes is the maximum number of bytes suggested for a backend
- // quota. A larger quota may lead to degraded performance.
- MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
-)
-
-// Quota represents an arbitrary quota against arbitrary requests. Each request
-// costs some charge; if there is not enough remaining charge, then there are
-// too few resources available within the quota to apply the request.
-type Quota interface {
- // Available judges whether the given request fits within the quota.
- Available(req interface{}) bool
- // Cost computes the charge against the quota for a given request.
- Cost(req interface{}) int
- // Remaining is the amount of charge left for the quota.
- Remaining() int64
-}
-
-type passthroughQuota struct{}
-
-func (*passthroughQuota) Available(interface{}) bool { return true }
-func (*passthroughQuota) Cost(interface{}) int { return 0 }
-func (*passthroughQuota) Remaining() int64 { return 1 }
-
-type backendQuota struct {
- s *EtcdServer
- maxBackendBytes int64
-}
-
-const (
- // leaseOverhead is an estimate for the cost of storing a lease
- leaseOverhead = 64
- // kvOverhead is an estimate for the cost of storing a key's metadata
- kvOverhead = 256
-)
-
-func NewBackendQuota(s *EtcdServer) Quota {
- quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes))
-
- if s.Cfg.QuotaBackendBytes < 0 {
- // disable quotas if negative
- plog.Warningf("disabling backend quota")
- return &passthroughQuota{}
- }
-
- if s.Cfg.QuotaBackendBytes == 0 {
- // use default size if no quota size given
- quotaBackendBytes.Set(float64(DefaultQuotaBytes))
- return &backendQuota{s, DefaultQuotaBytes}
- }
-
- if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
- plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
- }
- return &backendQuota{s, s.Cfg.QuotaBackendBytes}
-}
-
-func (b *backendQuota) Available(v interface{}) bool {
- // TODO: maybe optimize backend.Size()
- return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
-}
-
-func (b *backendQuota) Cost(v interface{}) int {
- switch r := v.(type) {
- case *pb.PutRequest:
- return costPut(r)
- case *pb.TxnRequest:
- return costTxn(r)
- case *pb.LeaseGrantRequest:
- return leaseOverhead
- default:
- panic("unexpected cost")
- }
-}
-
-func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
-
-func costTxnReq(u *pb.RequestOp) int {
- r := u.GetRequestPut()
- if r == nil {
- return 0
- }
- return costPut(r)
-}
-
-func costTxn(r *pb.TxnRequest) int {
- sizeSuccess := 0
- for _, u := range r.Success {
- sizeSuccess += costTxnReq(u)
- }
- sizeFailure := 0
- for _, u := range r.Failure {
- sizeFailure += costTxnReq(u)
- }
- if sizeFailure > sizeSuccess {
- return sizeFailure
- }
- return sizeSuccess
-}
-
-func (b *backendQuota) Remaining() int64 {
- return b.maxBackendBytes - b.s.Backend().Size()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go
deleted file mode 100644
index 1080633..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/raft.go
+++ /dev/null
@@ -1,608 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "encoding/json"
- "expvar"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/contention"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/rafthttp"
- "github.com/coreos/etcd/wal"
- "github.com/coreos/etcd/wal/walpb"
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- // Number of entries for slow follower to catch-up after compacting
- // the raft storage entries.
- // We expect the follower has a millisecond level latency with the leader.
- // The max throughput is around 10K. Keep a 5K entries is enough for helping
- // follower to catch up.
- numberOfCatchUpEntries = 5000
-
- // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
- // Assuming the RTT is around 10ms, 1MB max size is large enough.
- maxSizePerMsg = 1 * 1024 * 1024
- // Never overflow the rafthttp buffer, which is 4096.
- // TODO: a better const?
- maxInflightMsgs = 4096 / 8
-)
-
-var (
- // protects raftStatus
- raftStatusMu sync.Mutex
- // indirection for expvar func interface
- // expvar panics when publishing duplicate name
- // expvar does not support remove a registered name
- // so only register a func that calls raftStatus
- // and change raftStatus as we need.
- raftStatus func() raft.Status
-)
-
-func init() {
- raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
- expvar.Publish("raft.status", expvar.Func(func() interface{} {
- raftStatusMu.Lock()
- defer raftStatusMu.Unlock()
- return raftStatus()
- }))
-}
-
-type RaftTimer interface {
- Index() uint64
- Term() uint64
-}
-
-// apply contains entries, snapshot to be applied. Once
-// an apply is consumed, the entries will be persisted to
-// to raft storage concurrently; the application must read
-// raftDone before assuming the raft messages are stable.
-type apply struct {
- entries []raftpb.Entry
- snapshot raftpb.Snapshot
- // notifyc synchronizes etcd server applies with the raft node
- notifyc chan struct{}
-}
-
-type raftNode struct {
- // Cache of the latest raft index and raft term the server has seen.
- // These three unit64 fields must be the first elements to keep 64-bit
- // alignment for atomic access to the fields.
- index uint64
- term uint64
- lead uint64
-
- tickMu *sync.Mutex
- raftNodeConfig
-
- // a chan to send/receive snapshot
- msgSnapC chan raftpb.Message
-
- // a chan to send out apply
- applyc chan apply
-
- // a chan to send out readState
- readStateC chan raft.ReadState
-
- // utility
- ticker *time.Ticker
- // contention detectors for raft heartbeat message
- td *contention.TimeoutDetector
-
- stopped chan struct{}
- done chan struct{}
-}
-
-type raftNodeConfig struct {
- // to check if msg receiver is removed from cluster
- isIDRemoved func(id uint64) bool
- raft.Node
- raftStorage *raft.MemoryStorage
- storage Storage
- heartbeat time.Duration // for logging
- // transport specifies the transport to send and receive msgs to members.
- // Sending messages MUST NOT block. It is okay to drop messages, since
- // clients should timeout and reissue their messages.
- // If transport is nil, server will panic.
- transport rafthttp.Transporter
-}
-
-func newRaftNode(cfg raftNodeConfig) *raftNode {
- r := &raftNode{
- tickMu: new(sync.Mutex),
- raftNodeConfig: cfg,
- // set up contention detectors for raft heartbeat message.
- // expect to send a heartbeat within 2 heartbeat intervals.
- td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
- readStateC: make(chan raft.ReadState, 1),
- msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
- applyc: make(chan apply),
- stopped: make(chan struct{}),
- done: make(chan struct{}),
- }
- if r.heartbeat == 0 {
- r.ticker = &time.Ticker{}
- } else {
- r.ticker = time.NewTicker(r.heartbeat)
- }
- return r
-}
-
-// raft.Node does not have locks in Raft package
-func (r *raftNode) tick() {
- r.tickMu.Lock()
- r.Tick()
- r.tickMu.Unlock()
-}
-
-// start prepares and starts raftNode in a new goroutine. It is no longer safe
-// to modify the fields after it has been started.
-func (r *raftNode) start(rh *raftReadyHandler) {
- internalTimeout := time.Second
-
- go func() {
- defer r.onStop()
- islead := false
-
- for {
- select {
- case <-r.ticker.C:
- r.tick()
- case rd := <-r.Ready():
- if rd.SoftState != nil {
- newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
- if newLeader {
- leaderChanges.Inc()
- }
-
- if rd.SoftState.Lead == raft.None {
- hasLeader.Set(0)
- } else {
- hasLeader.Set(1)
- }
-
- atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
- islead = rd.RaftState == raft.StateLeader
- if islead {
- isLeader.Set(1)
- } else {
- isLeader.Set(0)
- }
- rh.updateLeadership(newLeader)
- r.td.Reset()
- }
-
- if len(rd.ReadStates) != 0 {
- select {
- case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
- case <-time.After(internalTimeout):
- plog.Warningf("timed out sending read state")
- case <-r.stopped:
- return
- }
- }
-
- notifyc := make(chan struct{}, 1)
- ap := apply{
- entries: rd.CommittedEntries,
- snapshot: rd.Snapshot,
- notifyc: notifyc,
- }
-
- updateCommittedIndex(&ap, rh)
-
- select {
- case r.applyc <- ap:
- case <-r.stopped:
- return
- }
-
- // the leader can write to its disk in parallel with replicating to the followers and them
- // writing to their disks.
- // For more details, check raft thesis 10.2.1
- if islead {
- // gofail: var raftBeforeLeaderSend struct{}
- r.transport.Send(r.processMessages(rd.Messages))
- }
-
- // gofail: var raftBeforeSave struct{}
- if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
- plog.Fatalf("raft save state and entries error: %v", err)
- }
- if !raft.IsEmptyHardState(rd.HardState) {
- proposalsCommitted.Set(float64(rd.HardState.Commit))
- }
- // gofail: var raftAfterSave struct{}
-
- if !raft.IsEmptySnap(rd.Snapshot) {
- // gofail: var raftBeforeSaveSnap struct{}
- if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
- plog.Fatalf("raft save snapshot error: %v", err)
- }
- // etcdserver now claim the snapshot has been persisted onto the disk
- notifyc <- struct{}{}
-
- // gofail: var raftAfterSaveSnap struct{}
- r.raftStorage.ApplySnapshot(rd.Snapshot)
- plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
- // gofail: var raftAfterApplySnap struct{}
- }
-
- r.raftStorage.Append(rd.Entries)
-
- if !islead {
- // finish processing incoming messages before we signal raftdone chan
- msgs := r.processMessages(rd.Messages)
-
- // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
- notifyc <- struct{}{}
-
- // Candidate or follower needs to wait for all pending configuration
- // changes to be applied before sending messages.
- // Otherwise we might incorrectly count votes (e.g. votes from removed members).
- // Also slow machine's follower raft-layer could proceed to become the leader
- // on its own single-node cluster, before apply-layer applies the config change.
- // We simply wait for ALL pending entries to be applied for now.
- // We might improve this later on if it causes unnecessary long blocking issues.
- waitApply := false
- for _, ent := range rd.CommittedEntries {
- if ent.Type == raftpb.EntryConfChange {
- waitApply = true
- break
- }
- }
- if waitApply {
- // blocks until 'applyAll' calls 'applyWait.Trigger'
- // to be in sync with scheduled config-change job
- // (assume notifyc has cap of 1)
- select {
- case notifyc <- struct{}{}:
- case <-r.stopped:
- return
- }
- }
-
- // gofail: var raftBeforeFollowerSend struct{}
- r.transport.Send(msgs)
- } else {
- // leader already processed 'MsgSnap' and signaled
- notifyc <- struct{}{}
- }
-
- r.Advance()
- case <-r.stopped:
- return
- }
- }
- }()
-}
-
-func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
- var ci uint64
- if len(ap.entries) != 0 {
- ci = ap.entries[len(ap.entries)-1].Index
- }
- if ap.snapshot.Metadata.Index > ci {
- ci = ap.snapshot.Metadata.Index
- }
- if ci != 0 {
- rh.updateCommittedIndex(ci)
- }
-}
-
-func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
- sentAppResp := false
- for i := len(ms) - 1; i >= 0; i-- {
- if r.isIDRemoved(ms[i].To) {
- ms[i].To = 0
- }
-
- if ms[i].Type == raftpb.MsgAppResp {
- if sentAppResp {
- ms[i].To = 0
- } else {
- sentAppResp = true
- }
- }
-
- if ms[i].Type == raftpb.MsgSnap {
- // There are two separate data store: the store for v2, and the KV for v3.
- // The msgSnap only contains the most recent snapshot of store without KV.
- // So we need to redirect the msgSnap to etcd server main loop for merging in the
- // current store snapshot and KV snapshot.
- select {
- case r.msgSnapC <- ms[i]:
- default:
- // drop msgSnap if the inflight chan if full.
- }
- ms[i].To = 0
- }
- if ms[i].Type == raftpb.MsgHeartbeat {
- ok, exceed := r.td.Observe(ms[i].To)
- if !ok {
- // TODO: limit request rate.
- plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
- plog.Warningf("server is likely overloaded")
- heartbeatSendFailures.Inc()
- }
- }
- }
- return ms
-}
-
-func (r *raftNode) apply() chan apply {
- return r.applyc
-}
-
-func (r *raftNode) stop() {
- r.stopped <- struct{}{}
- <-r.done
-}
-
-func (r *raftNode) onStop() {
- r.Stop()
- r.ticker.Stop()
- r.transport.Stop()
- if err := r.storage.Close(); err != nil {
- plog.Panicf("raft close storage error: %v", err)
- }
- close(r.done)
-}
-
-// for testing
-func (r *raftNode) pauseSending() {
- p := r.transport.(rafthttp.Pausable)
- p.Pause()
-}
-
-func (r *raftNode) resumeSending() {
- p := r.transport.(rafthttp.Pausable)
- p.Resume()
-}
-
-// advanceTicks advances ticks of Raft node.
-// This can be used for fast-forwarding election
-// ticks in multi data-center deployments, thus
-// speeding up election process.
-func (r *raftNode) advanceTicks(ticks int) {
- for i := 0; i < ticks; i++ {
- r.tick()
- }
-}
-
-func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
- var err error
- member := cl.MemberByName(cfg.Name)
- metadata := pbutil.MustMarshal(
- &pb.Metadata{
- NodeID: uint64(member.ID),
- ClusterID: uint64(cl.ID()),
- },
- )
- if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
- plog.Fatalf("create wal error: %v", err)
- }
- peers := make([]raft.Peer, len(ids))
- for i, id := range ids {
- ctx, err := json.Marshal((*cl).Member(id))
- if err != nil {
- plog.Panicf("marshal member should never fail: %v", err)
- }
- peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
- }
- id = member.ID
- plog.Infof("starting member %s in cluster %s", id, cl.ID())
- s = raft.NewMemoryStorage()
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- }
-
- n = raft.StartNode(c, peers)
- raftStatusMu.Lock()
- raftStatus = n.Status
- raftStatusMu.Unlock()
- return id, n, s, w
-}
-
-func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
- var walsnap walpb.Snapshot
- if snapshot != nil {
- walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
- }
- w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
-
- plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
- cl := membership.NewCluster("")
- cl.SetID(cid)
- s := raft.NewMemoryStorage()
- if snapshot != nil {
- s.ApplySnapshot(*snapshot)
- }
- s.SetHardState(st)
- s.Append(ents)
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- }
-
- n := raft.RestartNode(c)
- raftStatusMu.Lock()
- raftStatus = n.Status
- raftStatusMu.Unlock()
- return id, cl, n, s, w
-}
-
-func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
- var walsnap walpb.Snapshot
- if snapshot != nil {
- walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
- }
- w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
-
- // discard the previously uncommitted entries
- for i, ent := range ents {
- if ent.Index > st.Commit {
- plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
- ents = ents[:i]
- break
- }
- }
-
- // force append the configuration change entries
- toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
- ents = append(ents, toAppEnts...)
-
- // force commit newly appended entries
- err := w.Save(raftpb.HardState{}, toAppEnts)
- if err != nil {
- plog.Fatalf("%v", err)
- }
- if len(ents) != 0 {
- st.Commit = ents[len(ents)-1].Index
- }
-
- plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
- cl := membership.NewCluster("")
- cl.SetID(cid)
- s := raft.NewMemoryStorage()
- if snapshot != nil {
- s.ApplySnapshot(*snapshot)
- }
- s.SetHardState(st)
- s.Append(ents)
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- }
- n := raft.RestartNode(c)
- raftStatus = n.Status
- return id, cl, n, s, w
-}
-
-// getIDs returns an ordered set of IDs included in the given snapshot and
-// the entries. The given snapshot/entries can contain two kinds of
-// ID-related entry:
-// - ConfChangeAddNode, in which case the contained ID will be added into the set.
-// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
-func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
- ids := make(map[uint64]bool)
- if snap != nil {
- for _, id := range snap.Metadata.ConfState.Nodes {
- ids[id] = true
- }
- }
- for _, e := range ents {
- if e.Type != raftpb.EntryConfChange {
- continue
- }
- var cc raftpb.ConfChange
- pbutil.MustUnmarshal(&cc, e.Data)
- switch cc.Type {
- case raftpb.ConfChangeAddNode:
- ids[cc.NodeID] = true
- case raftpb.ConfChangeRemoveNode:
- delete(ids, cc.NodeID)
- case raftpb.ConfChangeUpdateNode:
- // do nothing
- default:
- plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
- }
- }
- sids := make(types.Uint64Slice, 0, len(ids))
- for id := range ids {
- sids = append(sids, id)
- }
- sort.Sort(sids)
- return []uint64(sids)
-}
-
-// createConfigChangeEnts creates a series of Raft entries (i.e.
-// EntryConfChange) to remove the set of given IDs from the cluster. The ID
-// `self` is _not_ removed, even if present in the set.
-// If `self` is not inside the given ids, it creates a Raft entry to add a
-// default member with the given `self`.
-func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
- ents := make([]raftpb.Entry, 0)
- next := index + 1
- found := false
- for _, id := range ids {
- if id == self {
- found = true
- continue
- }
- cc := &raftpb.ConfChange{
- Type: raftpb.ConfChangeRemoveNode,
- NodeID: id,
- }
- e := raftpb.Entry{
- Type: raftpb.EntryConfChange,
- Data: pbutil.MustMarshal(cc),
- Term: term,
- Index: next,
- }
- ents = append(ents, e)
- next++
- }
- if !found {
- m := membership.Member{
- ID: types.ID(self),
- RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
- }
- ctx, err := json.Marshal(m)
- if err != nil {
- plog.Panicf("marshal member should never fail: %v", err)
- }
- cc := &raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: self,
- Context: ctx,
- }
- e := raftpb.Entry{
- Type: raftpb.EntryConfChange,
- Data: pbutil.MustMarshal(cc),
- Term: term,
- Index: next,
- }
- ents = append(ents, e)
- }
- return ents
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go
deleted file mode 100644
index 71e2bcf..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/server.go
+++ /dev/null
@@ -1,1745 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "encoding/json"
- "expvar"
- "fmt"
- "math"
- "math/rand"
- "net/http"
- "os"
- "path"
- "regexp"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/coreos/etcd/alarm"
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/compactor"
- "github.com/coreos/etcd/discovery"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/lease/leasehttp"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/fileutil"
- "github.com/coreos/etcd/pkg/idutil"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/runtime"
- "github.com/coreos/etcd/pkg/schedule"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/pkg/wait"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/rafthttp"
- "github.com/coreos/etcd/snap"
- "github.com/coreos/etcd/store"
- "github.com/coreos/etcd/version"
- "github.com/coreos/etcd/wal"
-
- "github.com/coreos/go-semver/semver"
- "github.com/coreos/pkg/capnslog"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-const (
- DefaultSnapCount = 100000
-
- StoreClusterPrefix = "/0"
- StoreKeysPrefix = "/1"
-
- // HealthInterval is the minimum time the cluster should be healthy
- // before accepting add member requests.
- HealthInterval = 5 * time.Second
-
- purgeFileInterval = 30 * time.Second
- // monitorVersionInterval should be smaller than the timeout
- // on the connection. Or we will not be able to reuse the connection
- // (since it will timeout).
- monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
-
- // max number of in-flight snapshot messages etcdserver allows to have
- // This number is more than enough for most clusters with 5 machines.
- maxInFlightMsgSnap = 16
-
- releaseDelayAfterSnapshot = 30 * time.Second
-
- // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
- maxPendingRevokes = 16
-
- recommendedMaxRequestBytes = 10 * 1024 * 1024
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
-
- storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
-)
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-
- expvar.Publish(
- "file_descriptor_limit",
- expvar.Func(
- func() interface{} {
- n, _ := runtime.FDLimit()
- return n
- },
- ),
- )
-}
-
-type Response struct {
- Term uint64
- Index uint64
- Event *store.Event
- Watcher store.Watcher
- Err error
-}
-
-type ServerV2 interface {
- Server
- // Do takes a V2 request and attempts to fulfill it, returning a Response.
- Do(ctx context.Context, r pb.Request) (Response, error)
- stats.Stats
- ClientCertAuthEnabled() bool
-}
-
-type ServerV3 interface {
- Server
- ID() types.ID
- RaftTimer
-}
-
-func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
-
-type Server interface {
- // Leader returns the ID of the leader Server.
- Leader() types.ID
-
- // AddMember attempts to add a member into the cluster. It will return
- // ErrIDRemoved if member ID is removed from the cluster, or return
- // ErrIDExists if member ID exists in the cluster.
- AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
- // RemoveMember attempts to remove a member from the cluster. It will
- // return ErrIDRemoved if member ID is removed from the cluster, or return
- // ErrIDNotFound if member ID is not in the cluster.
- RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
- // UpdateMember attempts to update an existing member in the cluster. It will
- // return ErrIDNotFound if the member ID does not exist.
- UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
-
- // ClusterVersion is the cluster-wide minimum major.minor version.
- // Cluster version is set to the min version that an etcd member is
- // compatible with when first bootstrap.
- //
- // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
- //
- // During a rolling upgrades, the ClusterVersion will be updated
- // automatically after a sync. (5 second by default)
- //
- // The API/raft component can utilize ClusterVersion to determine if
- // it can accept a client request or a raft RPC.
- // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
- // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
- // this feature is introduced post 2.0.
- ClusterVersion() *semver.Version
- Cluster() api.Cluster
- Alarms() []*pb.AlarmMember
-}
-
-// EtcdServer is the production implementation of the Server interface
-type EtcdServer struct {
- // inflightSnapshots holds count the number of snapshots currently inflight.
- inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
- appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
- committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
- // consistIndex used to hold the offset of current executing entry
- // It is initialized to 0 before executing any entry.
- consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
- r raftNode // uses 64-bit atomics; keep 64-bit aligned.
-
- readych chan struct{}
- Cfg ServerConfig
-
- w wait.Wait
-
- readMu sync.RWMutex
- // read routine notifies etcd server that it waits for reading by sending an empty struct to
- // readwaitC
- readwaitc chan struct{}
- // readNotifier is used to notify the read routine that it can process the request
- // when there is no error
- readNotifier *notifier
-
- // stop signals the run goroutine should shutdown.
- stop chan struct{}
- // stopping is closed by run goroutine on shutdown.
- stopping chan struct{}
- // done is closed when all goroutines from start() complete.
- done chan struct{}
-
- errorc chan error
- id types.ID
- attributes membership.Attributes
-
- cluster *membership.RaftCluster
-
- store store.Store
- snapshotter *snap.Snapshotter
-
- applyV2 ApplierV2
-
- // applyV3 is the applier with auth and quotas
- applyV3 applierV3
- // applyV3Base is the core applier without auth or quotas
- applyV3Base applierV3
- applyWait wait.WaitTime
-
- kv mvcc.ConsistentWatchableKV
- lessor lease.Lessor
- bemu sync.Mutex
- be backend.Backend
- authStore auth.AuthStore
- alarmStore *alarm.AlarmStore
-
- stats *stats.ServerStats
- lstats *stats.LeaderStats
-
- SyncTicker *time.Ticker
- // compactor is used to auto-compact the KV.
- compactor compactor.Compactor
-
- // peerRt used to send requests (version, lease) to peers.
- peerRt http.RoundTripper
- reqIDGen *idutil.Generator
-
- // forceVersionC is used to force the version monitor loop
- // to detect the cluster version immediately.
- forceVersionC chan struct{}
-
- // wgMu blocks concurrent waitgroup mutation while server stopping
- wgMu sync.RWMutex
- // wg is used to wait for the go routines that depends on the server state
- // to exit when stopping the server.
- wg sync.WaitGroup
-
- // ctx is used for etcd-initiated requests that may need to be canceled
- // on etcd server shutdown.
- ctx context.Context
- cancel context.CancelFunc
-
- leadTimeMu sync.RWMutex
- leadElectedTime time.Time
-}
-
-// NewServer creates a new EtcdServer from the supplied configuration. The
-// configuration is considered static for the lifetime of the EtcdServer.
-func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
- st := store.New(StoreClusterPrefix, StoreKeysPrefix)
-
- var (
- w *wal.WAL
- n raft.Node
- s *raft.MemoryStorage
- id types.ID
- cl *membership.RaftCluster
- )
-
- if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
- plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
- }
-
- if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
- return nil, fmt.Errorf("cannot access data directory: %v", terr)
- }
-
- haveWAL := wal.Exist(cfg.WALDir())
-
- if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
- plog.Fatalf("create snapshot directory error: %v", err)
- }
- ss := snap.New(cfg.SnapDir())
-
- bepath := cfg.backendPath()
- beExist := fileutil.Exist(bepath)
- be := openBackend(cfg)
-
- defer func() {
- if err != nil {
- be.Close()
- }
- }()
-
- prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
- if err != nil {
- return nil, err
- }
- var (
- remotes []*membership.Member
- snapshot *raftpb.Snapshot
- )
-
- switch {
- case !haveWAL && !cfg.NewCluster:
- if err = cfg.VerifyJoinExisting(); err != nil {
- return nil, err
- }
- cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
- if err != nil {
- return nil, err
- }
- existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
- if gerr != nil {
- return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
- }
- if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
- return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
- }
- if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
- return nil, fmt.Errorf("incompatible with current running cluster")
- }
-
- remotes = existingCluster.Members()
- cl.SetID(existingCluster.ID())
- cl.SetStore(st)
- cl.SetBackend(be)
- cfg.Print()
- id, n, s, w = startNode(cfg, cl, nil)
- case !haveWAL && cfg.NewCluster:
- if err = cfg.VerifyBootstrap(); err != nil {
- return nil, err
- }
- cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
- if err != nil {
- return nil, err
- }
- m := cl.MemberByName(cfg.Name)
- if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
- return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
- }
- if cfg.ShouldDiscover() {
- var str string
- str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
- if err != nil {
- return nil, &DiscoveryError{Op: "join", Err: err}
- }
- var urlsmap types.URLsMap
- urlsmap, err = types.NewURLsMap(str)
- if err != nil {
- return nil, err
- }
- if checkDuplicateURL(urlsmap) {
- return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
- }
- if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
- return nil, err
- }
- }
- cl.SetStore(st)
- cl.SetBackend(be)
- cfg.PrintWithInitial()
- id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
- case haveWAL:
- if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
- return nil, fmt.Errorf("cannot write to member directory: %v", err)
- }
-
- if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
- return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
- }
-
- if cfg.ShouldDiscover() {
- plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
- }
- snapshot, err = ss.Load()
- if err != nil && err != snap.ErrNoSnapshot {
- return nil, err
- }
- if snapshot != nil {
- if err = st.Recovery(snapshot.Data); err != nil {
- plog.Panicf("recovered store from snapshot error: %v", err)
- }
- plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
- if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
- plog.Panicf("recovering backend from snapshot error: %v", err)
- }
- }
- cfg.Print()
- if !cfg.ForceNewCluster {
- id, cl, n, s, w = restartNode(cfg, snapshot)
- } else {
- id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
- }
- cl.SetStore(st)
- cl.SetBackend(be)
- cl.Recover(api.UpdateCapability)
- if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
- os.RemoveAll(bepath)
- return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
- }
- default:
- return nil, fmt.Errorf("unsupported bootstrap config")
- }
-
- if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
- return nil, fmt.Errorf("cannot access member directory: %v", terr)
- }
-
- sstats := stats.NewServerStats(cfg.Name, id.String())
- lstats := stats.NewLeaderStats(id.String())
-
- heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
- srv = &EtcdServer{
- readych: make(chan struct{}),
- Cfg: cfg,
- errorc: make(chan error, 1),
- store: st,
- snapshotter: ss,
- r: *newRaftNode(
- raftNodeConfig{
- isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
- Node: n,
- heartbeat: heartbeat,
- raftStorage: s,
- storage: NewStorage(w, ss),
- },
- ),
- id: id,
- attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
- cluster: cl,
- stats: sstats,
- lstats: lstats,
- SyncTicker: time.NewTicker(500 * time.Millisecond),
- peerRt: prt,
- reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
- forceVersionC: make(chan struct{}),
- }
- serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
-
- srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
-
- srv.be = be
- minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
-
- // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
- // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
- srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
- srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
- if beExist {
- kvindex := srv.kv.ConsistentIndex()
- // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
- // etcd from pre-3.0 release.
- if snapshot != nil && kvindex < snapshot.Metadata.Index {
- if kvindex != 0 {
- return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index)
- }
- plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
- }
- }
- newSrv := srv // since srv == nil in defer if srv is returned as nil
- defer func() {
- // closing backend without first closing kv can cause
- // resumed compactions to fail with closed tx errors
- if err != nil {
- newSrv.kv.Close()
- }
- }()
-
- srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
- tp, err := auth.NewTokenProvider(cfg.AuthToken,
- func(index uint64) <-chan struct{} {
- return srv.applyWait.Wait(index)
- },
- )
- if err != nil {
- plog.Errorf("failed to create token provider: %s", err)
- return nil, err
- }
- srv.authStore = auth.NewAuthStore(srv.be, tp)
- if num := cfg.AutoCompactionRetention; num != 0 {
- srv.compactor, err = compactor.New(cfg.AutoCompactionMode, num, srv.kv, srv)
- if err != nil {
- return nil, err
- }
- srv.compactor.Run()
- }
-
- srv.applyV3Base = srv.newApplierV3Backend()
- if err = srv.restoreAlarms(); err != nil {
- return nil, err
- }
-
- // TODO: move transport initialization near the definition of remote
- tr := &rafthttp.Transport{
- TLSInfo: cfg.PeerTLSInfo,
- DialTimeout: cfg.peerDialTimeout(),
- ID: id,
- URLs: cfg.PeerURLs,
- ClusterID: cl.ID(),
- Raft: srv,
- Snapshotter: ss,
- ServerStats: sstats,
- LeaderStats: lstats,
- ErrorC: srv.errorc,
- }
- if err = tr.Start(); err != nil {
- return nil, err
- }
- // add all remotes into transport
- for _, m := range remotes {
- if m.ID != id {
- tr.AddRemote(m.ID, m.PeerURLs)
- }
- }
- for _, m := range cl.Members() {
- if m.ID != id {
- tr.AddPeer(m.ID, m.PeerURLs)
- }
- }
- srv.r.transport = tr
-
- return srv, nil
-}
-
-func (s *EtcdServer) adjustTicks() {
- clusterN := len(s.cluster.Members())
-
- // single-node fresh start, or single-node recovers from snapshot
- if clusterN == 1 {
- ticks := s.Cfg.ElectionTicks - 1
- plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
- s.r.advanceTicks(ticks)
- return
- }
-
- if !s.Cfg.InitialElectionTickAdvance {
- plog.Infof("skipping initial election tick advance (election tick %d)", s.Cfg.ElectionTicks)
- return
- }
-
- // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
- // until peer connection reports; otherwise:
- // 1. all connections failed, or
- // 2. no active peers, or
- // 3. restarted single-node with no snapshot
- // then, do nothing, because advancing ticks would have no effect
- waitTime := rafthttp.ConnReadTimeout
- itv := 50 * time.Millisecond
- for i := int64(0); i < int64(waitTime/itv); i++ {
- select {
- case <-time.After(itv):
- case <-s.stopping:
- return
- }
-
- peerN := s.r.transport.ActivePeers()
- if peerN > 1 {
- // multi-node received peer connection reports
- // adjust ticks, in case slow leader message receive
- ticks := s.Cfg.ElectionTicks - 2
- plog.Infof("%s initialzed peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
- s.r.advanceTicks(ticks)
- return
- }
- }
-}
-
-// Start performs any initialization of the Server necessary for it to
-// begin serving requests. It must be called before Do or Process.
-// Start must be non-blocking; any long-running server functionality
-// should be implemented in goroutines.
-func (s *EtcdServer) Start() {
- s.start()
- s.goAttach(func() { s.adjustTicks() })
- s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
- s.goAttach(s.purgeFile)
- s.goAttach(func() { monitorFileDescriptor(s.stopping) })
- s.goAttach(s.monitorVersions)
- s.goAttach(s.linearizableReadLoop)
- s.goAttach(s.monitorKVHash)
-}
-
-// start prepares and starts server in a new goroutine. It is no longer safe to
-// modify a server's fields after it has been sent to Start.
-// This function is just used for testing.
-func (s *EtcdServer) start() {
- if s.Cfg.SnapCount == 0 {
- plog.Infof("set snapshot count to default %d", DefaultSnapCount)
- s.Cfg.SnapCount = DefaultSnapCount
- }
- s.w = wait.New()
- s.applyWait = wait.NewTimeList()
- s.done = make(chan struct{})
- s.stop = make(chan struct{})
- s.stopping = make(chan struct{})
- s.ctx, s.cancel = context.WithCancel(context.Background())
- s.readwaitc = make(chan struct{}, 1)
- s.readNotifier = newNotifier()
- if s.ClusterVersion() != nil {
- plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
- } else {
- plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
- }
- // TODO: if this is an empty log, writes all peer infos
- // into the first entry
- go s.run()
-}
-
-func (s *EtcdServer) purgeFile() {
- var dberrc, serrc, werrc <-chan error
- if s.Cfg.MaxSnapFiles > 0 {
- dberrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
- serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
- }
- if s.Cfg.MaxWALFiles > 0 {
- werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
- }
- select {
- case e := <-dberrc:
- plog.Fatalf("failed to purge snap db file %v", e)
- case e := <-serrc:
- plog.Fatalf("failed to purge snap file %v", e)
- case e := <-werrc:
- plog.Fatalf("failed to purge wal file %v", e)
- case <-s.stopping:
- return
- }
-}
-
-func (s *EtcdServer) ID() types.ID { return s.id }
-
-func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
-
-func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
-
-type ServerPeer interface {
- ServerV2
- RaftHandler() http.Handler
- LeaseHandler() http.Handler
-}
-
-func (s *EtcdServer) LeaseHandler() http.Handler {
- if s.lessor == nil {
- return nil
- }
- return leasehttp.NewHandler(s.lessor, s.ApplyWait)
-}
-
-func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
-
-// Process takes a raft message and applies it to the server's raft state
-// machine, respecting any timeout of the given context.
-func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
- if s.cluster.IsIDRemoved(types.ID(m.From)) {
- plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
- return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
- }
- if m.Type == raftpb.MsgApp {
- s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
- }
- return s.r.Step(ctx, m)
-}
-
-func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
-
-func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
-
-// ReportSnapshot reports snapshot sent status to the raft state machine,
-// and clears the used snapshot from the snapshot store.
-func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
- s.r.ReportSnapshot(id, status)
-}
-
-type etcdProgress struct {
- confState raftpb.ConfState
- snapi uint64
- appliedt uint64
- appliedi uint64
-}
-
-// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
-// and helps decouple state machine logic from Raft algorithms.
-// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
-type raftReadyHandler struct {
- updateLeadership func(newLeader bool)
- updateCommittedIndex func(uint64)
-}
-
-func (s *EtcdServer) run() {
- sn, err := s.r.raftStorage.Snapshot()
- if err != nil {
- plog.Panicf("get snapshot from raft storage error: %v", err)
- }
-
- // asynchronously accept apply packets, dispatch progress in-order
- sched := schedule.NewFIFOScheduler()
-
- var (
- smu sync.RWMutex
- syncC <-chan time.Time
- )
- setSyncC := func(ch <-chan time.Time) {
- smu.Lock()
- syncC = ch
- smu.Unlock()
- }
- getSyncC := func() (ch <-chan time.Time) {
- smu.RLock()
- ch = syncC
- smu.RUnlock()
- return
- }
- rh := &raftReadyHandler{
- updateLeadership: func(newLeader bool) {
- if !s.isLeader() {
- if s.lessor != nil {
- s.lessor.Demote()
- }
- if s.compactor != nil {
- s.compactor.Pause()
- }
- setSyncC(nil)
- } else {
- if newLeader {
- t := time.Now()
- s.leadTimeMu.Lock()
- s.leadElectedTime = t
- s.leadTimeMu.Unlock()
- }
- setSyncC(s.SyncTicker.C)
- if s.compactor != nil {
- s.compactor.Resume()
- }
- }
-
- // TODO: remove the nil checking
- // current test utility does not provide the stats
- if s.stats != nil {
- s.stats.BecomeLeader()
- }
- },
- updateCommittedIndex: func(ci uint64) {
- cci := s.getCommittedIndex()
- if ci > cci {
- s.setCommittedIndex(ci)
- }
- },
- }
- s.r.start(rh)
-
- ep := etcdProgress{
- confState: sn.Metadata.ConfState,
- snapi: sn.Metadata.Index,
- appliedt: sn.Metadata.Term,
- appliedi: sn.Metadata.Index,
- }
-
- defer func() {
- s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
- close(s.stopping)
- s.wgMu.Unlock()
- s.cancel()
-
- sched.Stop()
-
- // wait for gouroutines before closing raft so wal stays open
- s.wg.Wait()
-
- s.SyncTicker.Stop()
-
- // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
- // by adding a peer after raft stops the transport
- s.r.stop()
-
- // kv, lessor and backend can be nil if running without v3 enabled
- // or running unit tests.
- if s.lessor != nil {
- s.lessor.Stop()
- }
- if s.kv != nil {
- s.kv.Close()
- }
- if s.authStore != nil {
- s.authStore.Close()
- }
- if s.be != nil {
- s.be.Close()
- }
- if s.compactor != nil {
- s.compactor.Stop()
- }
- close(s.done)
- }()
-
- var expiredLeaseC <-chan []*lease.Lease
- if s.lessor != nil {
- expiredLeaseC = s.lessor.ExpiredLeasesC()
- }
-
- for {
- select {
- case ap := <-s.r.apply():
- f := func(context.Context) { s.applyAll(&ep, &ap) }
- sched.Schedule(f)
- case leases := <-expiredLeaseC:
- s.goAttach(func() {
- // Increases throughput of expired leases deletion process through parallelization
- c := make(chan struct{}, maxPendingRevokes)
- for _, lease := range leases {
- select {
- case c <- struct{}{}:
- case <-s.stopping:
- return
- }
- lid := lease.ID
- s.goAttach(func() {
- ctx := s.authStore.WithRoot(s.ctx)
- _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
- if lerr == nil {
- leaseExpired.Inc()
- } else {
- plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
- }
-
- <-c
- })
- }
- })
- case err := <-s.errorc:
- plog.Errorf("%s", err)
- plog.Infof("the data-dir used by this member must be removed.")
- return
- case <-getSyncC():
- if s.store.HasTTLKeys() {
- s.sync(s.Cfg.ReqTimeout())
- }
- case <-s.stop:
- return
- }
- }
-}
-
-func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
- s.applySnapshot(ep, apply)
- s.applyEntries(ep, apply)
-
- proposalsApplied.Set(float64(ep.appliedi))
- s.applyWait.Trigger(ep.appliedi)
- // wait for the raft routine to finish the disk writes before triggering a
- // snapshot. or applied index might be greater than the last index in raft
- // storage, since the raft routine might be slower than apply routine.
- <-apply.notifyc
-
- s.triggerSnapshot(ep)
- select {
- // snapshot requested via send()
- case m := <-s.r.msgSnapC:
- merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
- s.sendMergedSnap(merged)
- default:
- }
-}
-
-func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
- if raft.IsEmptySnap(apply.snapshot) {
- return
- }
-
- plog.Infof("applying snapshot at index %d...", ep.snapi)
- defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
-
- if apply.snapshot.Metadata.Index <= ep.appliedi {
- plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
- apply.snapshot.Metadata.Index, ep.appliedi)
- }
-
- // wait for raftNode to persist snapshot onto the disk
- <-apply.notifyc
-
- newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
- if err != nil {
- plog.Panic(err)
- }
-
- // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
- // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
- if s.lessor != nil {
- plog.Info("recovering lessor...")
- s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
- plog.Info("finished recovering lessor")
- }
-
- plog.Info("restoring mvcc store...")
-
- if err := s.kv.Restore(newbe); err != nil {
- plog.Panicf("restore KV error: %v", err)
- }
- s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
-
- plog.Info("finished restoring mvcc store")
-
- // Closing old backend might block until all the txns
- // on the backend are finished.
- // We do not want to wait on closing the old backend.
- s.bemu.Lock()
- oldbe := s.be
- go func() {
- plog.Info("closing old backend...")
- defer plog.Info("finished closing old backend")
-
- if err := oldbe.Close(); err != nil {
- plog.Panicf("close backend error: %v", err)
- }
- }()
-
- s.be = newbe
- s.bemu.Unlock()
-
- plog.Info("recovering alarms...")
- if err := s.restoreAlarms(); err != nil {
- plog.Panicf("restore alarms error: %v", err)
- }
- plog.Info("finished recovering alarms")
-
- if s.authStore != nil {
- plog.Info("recovering auth store...")
- s.authStore.Recover(newbe)
- plog.Info("finished recovering auth store")
- }
-
- plog.Info("recovering store v2...")
- if err := s.store.Recovery(apply.snapshot.Data); err != nil {
- plog.Panicf("recovery store error: %v", err)
- }
- plog.Info("finished recovering store v2")
-
- s.cluster.SetBackend(s.be)
- plog.Info("recovering cluster configuration...")
- s.cluster.Recover(api.UpdateCapability)
- plog.Info("finished recovering cluster configuration")
-
- plog.Info("removing old peers from network...")
- // recover raft transport
- s.r.transport.RemoveAllPeers()
- plog.Info("finished removing old peers from network")
-
- plog.Info("adding peers from new cluster configuration into network...")
- for _, m := range s.cluster.Members() {
- if m.ID == s.ID() {
- continue
- }
- s.r.transport.AddPeer(m.ID, m.PeerURLs)
- }
- plog.Info("finished adding peers from new cluster configuration into network...")
-
- ep.appliedt = apply.snapshot.Metadata.Term
- ep.appliedi = apply.snapshot.Metadata.Index
- ep.snapi = ep.appliedi
- ep.confState = apply.snapshot.Metadata.ConfState
-}
-
-func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
- if len(apply.entries) == 0 {
- return
- }
- firsti := apply.entries[0].Index
- if firsti > ep.appliedi+1 {
- plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
- }
- var ents []raftpb.Entry
- if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
- ents = apply.entries[ep.appliedi+1-firsti:]
- }
- if len(ents) == 0 {
- return
- }
- var shouldstop bool
- if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
- go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
- }
-}
-
-func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
- if ep.appliedi-ep.snapi <= s.Cfg.SnapCount {
- return
- }
-
- plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
- s.snapshot(ep.appliedi, ep.confState)
- ep.snapi = ep.appliedi
-}
-
-func (s *EtcdServer) isMultiNode() bool {
- return s.cluster != nil && len(s.cluster.MemberIDs()) > 1
-}
-
-func (s *EtcdServer) isLeader() bool {
- return uint64(s.ID()) == s.Lead()
-}
-
-// MoveLeader transfers the leader to the given transferee.
-func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
- now := time.Now()
- interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
-
- plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
- s.r.TransferLeadership(ctx, lead, transferee)
- for s.Lead() != transferee {
- select {
- case <-ctx.Done(): // time out
- return ErrTimeoutLeaderTransfer
- case <-time.After(interval):
- }
- }
-
- // TODO: drain all requests, or drop all messages to the old leader
-
- plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
- return nil
-}
-
-// TransferLeadership transfers the leader to the chosen transferee.
-func (s *EtcdServer) TransferLeadership() error {
- if !s.isLeader() {
- plog.Printf("skipped leadership transfer for stopping non-leader member")
- return nil
- }
-
- if !s.isMultiNode() {
- plog.Printf("skipped leadership transfer for single member cluster")
- return nil
- }
-
- transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs())
- if !ok {
- return ErrUnhealthy
- }
-
- tm := s.Cfg.ReqTimeout()
- ctx, cancel := context.WithTimeout(s.ctx, tm)
- err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
- cancel()
- return err
-}
-
-// HardStop stops the server without coordination with other members in the cluster.
-func (s *EtcdServer) HardStop() {
- select {
- case s.stop <- struct{}{}:
- case <-s.done:
- return
- }
- <-s.done
-}
-
-// Stop stops the server gracefully, and shuts down the running goroutine.
-// Stop should be called after a Start(s), otherwise it will block forever.
-// When stopping leader, Stop transfers its leadership to one of its peers
-// before stopping the server.
-// Stop terminates the Server and performs any necessary finalization.
-// Do and Process cannot be called after Stop has been invoked.
-func (s *EtcdServer) Stop() {
- if err := s.TransferLeadership(); err != nil {
- plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
- }
- s.HardStop()
-}
-
-// ReadyNotify returns a channel that will be closed when the server
-// is ready to serve client requests
-func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
-
-func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
- select {
- case <-time.After(d):
- case <-s.done:
- }
- select {
- case s.errorc <- err:
- default:
- }
-}
-
-// StopNotify returns a channel that receives a empty struct
-// when the server is stopped.
-func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
-
-func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
-
-func (s *EtcdServer) LeaderStats() []byte {
- lead := atomic.LoadUint64(&s.r.lead)
- if lead != uint64(s.id) {
- return nil
- }
- return s.lstats.JSON()
-}
-
-func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
-
-func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
- if s.authStore == nil {
- // In the context of ordinary etcd process, s.authStore will never be nil.
- // This branch is for handling cases in server_test.go
- return nil
- }
-
- // Note that this permission check is done in the API layer,
- // so TOCTOU problem can be caused potentially in a schedule like this:
- // update membership with user A -> revoke root role of A -> apply membership change
- // in the state machine layer
- // However, both of membership change and role management requires the root privilege.
- // So careful operation by admins can prevent the problem.
- authInfo, err := s.AuthInfoFromCtx(ctx)
- if err != nil {
- return err
- }
-
- return s.AuthStore().IsAdminPermitted(authInfo)
-}
-
-func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
- if err := s.checkMembershipOperationPermission(ctx); err != nil {
- return nil, err
- }
-
- if s.Cfg.StrictReconfigCheck {
- // by default StrictReconfigCheck is enabled; reject new members if unhealthy
- if !s.cluster.IsReadyToAddNewMember() {
- plog.Warningf("not enough started members, rejecting member add %+v", memb)
- return nil, ErrNotEnoughStartedMembers
- }
- if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
- plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
- return nil, ErrUnhealthy
- }
- }
-
- // TODO: move Member to protobuf type
- b, err := json.Marshal(memb)
- if err != nil {
- return nil, err
- }
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: uint64(memb.ID),
- Context: b,
- }
- return s.configure(ctx, cc)
-}
-
-func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- if err := s.checkMembershipOperationPermission(ctx); err != nil {
- return nil, err
- }
-
- // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
- if err := s.mayRemoveMember(types.ID(id)); err != nil {
- return nil, err
- }
-
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeRemoveNode,
- NodeID: id,
- }
- return s.configure(ctx, cc)
-}
-
-func (s *EtcdServer) mayRemoveMember(id types.ID) error {
- if !s.Cfg.StrictReconfigCheck {
- return nil
- }
-
- if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
- plog.Warningf("not enough started members, rejecting remove member %s", id)
- return ErrNotEnoughStartedMembers
- }
-
- // downed member is safe to remove since it's not part of the active quorum
- if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
- return nil
- }
-
- // protect quorum if some members are down
- m := s.cluster.Members()
- active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
- if (active - 1) < 1+((len(m)-1)/2) {
- plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
- return ErrUnhealthy
- }
-
- return nil
-}
-
-func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
- b, merr := json.Marshal(memb)
- if merr != nil {
- return nil, merr
- }
-
- if err := s.checkMembershipOperationPermission(ctx); err != nil {
- return nil, err
- }
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeUpdateNode,
- NodeID: uint64(memb.ID),
- Context: b,
- }
- return s.configure(ctx, cc)
-}
-
-// Implement the RaftTimer interface
-
-func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) }
-
-func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) }
-
-// Lead is only for testing purposes.
-// TODO: add Raft server interface to expose raft related info:
-// Index, Term, Lead, Committed, Applied, LastIndex, etc.
-func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
-
-func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
-
-type confChangeResponse struct {
- membs []*membership.Member
- err error
-}
-
-// configure sends a configuration change through consensus and
-// then waits for it to be applied to the server. It
-// will block until the change is performed or there is an error.
-func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
- cc.ID = s.reqIDGen.Next()
- ch := s.w.Register(cc.ID)
- start := time.Now()
- if err := s.r.ProposeConfChange(ctx, cc); err != nil {
- s.w.Trigger(cc.ID, nil)
- return nil, err
- }
- select {
- case x := <-ch:
- if x == nil {
- plog.Panicf("configure trigger value should never be nil")
- }
- resp := x.(*confChangeResponse)
- return resp.membs, resp.err
- case <-ctx.Done():
- s.w.Trigger(cc.ID, nil) // GC wait
- return nil, s.parseProposeCtxErr(ctx.Err(), start)
- case <-s.stopping:
- return nil, ErrStopped
- }
-}
-
-// sync proposes a SYNC request and is non-blocking.
-// This makes no guarantee that the request will be proposed or performed.
-// The request will be canceled after the given timeout.
-func (s *EtcdServer) sync(timeout time.Duration) {
- req := pb.Request{
- Method: "SYNC",
- ID: s.reqIDGen.Next(),
- Time: time.Now().UnixNano(),
- }
- data := pbutil.MustMarshal(&req)
- // There is no promise that node has leader when do SYNC request,
- // so it uses goroutine to propose.
- ctx, cancel := context.WithTimeout(s.ctx, timeout)
- s.goAttach(func() {
- s.r.Propose(ctx, data)
- cancel()
- })
-}
-
-// publish registers server information into the cluster. The information
-// is the JSON representation of this server's member struct, updated with the
-// static clientURLs of the server.
-// The function keeps attempting to register until it succeeds,
-// or its server is stopped.
-func (s *EtcdServer) publish(timeout time.Duration) {
- b, err := json.Marshal(s.attributes)
- if err != nil {
- plog.Panicf("json marshal error: %v", err)
- return
- }
- req := pb.Request{
- Method: "PUT",
- Path: membership.MemberAttributesStorePath(s.id),
- Val: string(b),
- }
-
- for {
- ctx, cancel := context.WithTimeout(s.ctx, timeout)
- _, err := s.Do(ctx, req)
- cancel()
- switch err {
- case nil:
- close(s.readych)
- plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
- return
- case ErrStopped:
- plog.Infof("aborting publish because server is stopped")
- return
- default:
- plog.Errorf("publish error: %v", err)
- }
- }
-}
-
-func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
- atomic.AddInt64(&s.inflightSnapshots, 1)
-
- s.r.transport.SendSnapshot(merged)
- s.goAttach(func() {
- select {
- case ok := <-merged.CloseNotify():
- // delay releasing inflight snapshot for another 30 seconds to
- // block log compaction.
- // If the follower still fails to catch up, it is probably just too slow
- // to catch up. We cannot avoid the snapshot cycle anyway.
- if ok {
- select {
- case <-time.After(releaseDelayAfterSnapshot):
- case <-s.stopping:
- }
- }
- atomic.AddInt64(&s.inflightSnapshots, -1)
- case <-s.stopping:
- return
- }
- })
-}
-
-// apply takes entries received from Raft (after it has been committed) and
-// applies them to the current state of the EtcdServer.
-// The given entries should not be empty.
-func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
- for i := range es {
- e := es[i]
- switch e.Type {
- case raftpb.EntryNormal:
- s.applyEntryNormal(&e)
- case raftpb.EntryConfChange:
- // set the consistent index of current executing entry
- if e.Index > s.consistIndex.ConsistentIndex() {
- s.consistIndex.setConsistentIndex(e.Index)
- }
- var cc raftpb.ConfChange
- pbutil.MustUnmarshal(&cc, e.Data)
- removedSelf, err := s.applyConfChange(cc, confState)
- s.setAppliedIndex(e.Index)
- shouldStop = shouldStop || removedSelf
- s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
- default:
- plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
- }
- atomic.StoreUint64(&s.r.index, e.Index)
- atomic.StoreUint64(&s.r.term, e.Term)
- appliedt = e.Term
- appliedi = e.Index
- }
- return appliedt, appliedi, shouldStop
-}
-
-// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
-func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
- shouldApplyV3 := false
- if e.Index > s.consistIndex.ConsistentIndex() {
- // set the consistent index of current executing entry
- s.consistIndex.setConsistentIndex(e.Index)
- shouldApplyV3 = true
- }
- defer s.setAppliedIndex(e.Index)
-
- // raft state machine may generate noop entry when leader confirmation.
- // skip it in advance to avoid some potential bug in the future
- if len(e.Data) == 0 {
- select {
- case s.forceVersionC <- struct{}{}:
- default:
- }
- // promote lessor when the local member is leader and finished
- // applying all entries from the last term.
- if s.isLeader() {
- s.lessor.Promote(s.Cfg.electionTimeout())
- }
- return
- }
-
- var raftReq pb.InternalRaftRequest
- if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
- var r pb.Request
- rp := &r
- pbutil.MustUnmarshal(rp, e.Data)
- s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
- return
- }
- if raftReq.V2 != nil {
- req := (*RequestV2)(raftReq.V2)
- s.w.Trigger(req.ID, s.applyV2Request(req))
- return
- }
-
- // do not re-apply applied entries.
- if !shouldApplyV3 {
- return
- }
-
- id := raftReq.ID
- if id == 0 {
- id = raftReq.Header.ID
- }
-
- var ar *applyResult
- needResult := s.w.IsRegistered(id)
- if needResult || !noSideEffect(&raftReq) {
- if !needResult && raftReq.Txn != nil {
- removeNeedlessRangeReqs(raftReq.Txn)
- }
- ar = s.applyV3.Apply(&raftReq)
- }
-
- if ar == nil {
- return
- }
-
- if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
- s.w.Trigger(id, ar)
- return
- }
-
- plog.Errorf("applying raft message exceeded backend quota")
- s.goAttach(func() {
- a := &pb.AlarmRequest{
- MemberID: uint64(s.ID()),
- Action: pb.AlarmRequest_ACTIVATE,
- Alarm: pb.AlarmType_NOSPACE,
- }
- s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
- s.w.Trigger(id, ar)
- })
-}
-
-// applyConfChange applies a ConfChange to the server. It is only
-// invoked with a ConfChange that has already passed through Raft
-func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
- if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
- cc.NodeID = raft.None
- s.r.ApplyConfChange(cc)
- return false, err
- }
- *confState = *s.r.ApplyConfChange(cc)
- switch cc.Type {
- case raftpb.ConfChangeAddNode:
- m := new(membership.Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- if cc.NodeID != uint64(m.ID) {
- plog.Panicf("nodeID should always be equal to member ID")
- }
- s.cluster.AddMember(m)
- if m.ID != s.id {
- s.r.transport.AddPeer(m.ID, m.PeerURLs)
- }
- case raftpb.ConfChangeRemoveNode:
- id := types.ID(cc.NodeID)
- s.cluster.RemoveMember(id)
- if id == s.id {
- return true, nil
- }
- s.r.transport.RemovePeer(id)
- case raftpb.ConfChangeUpdateNode:
- m := new(membership.Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- if cc.NodeID != uint64(m.ID) {
- plog.Panicf("nodeID should always be equal to member ID")
- }
- s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
- if m.ID != s.id {
- s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
- }
- }
- return false, nil
-}
-
-// TODO: non-blocking snapshot
-func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
- clone := s.store.Clone()
- // commit kv to write metadata (for example: consistent index) to disk.
- // KV().commit() updates the consistent index in backend.
- // All operations that update consistent index must be called sequentially
- // from applyAll function.
- // So KV().Commit() cannot run in parallel with apply. It has to be called outside
- // the go routine created below.
- s.KV().Commit()
-
- s.goAttach(func() {
- d, err := clone.SaveNoCopy()
- // TODO: current store will never fail to do a snapshot
- // what should we do if the store might fail?
- if err != nil {
- plog.Panicf("store save should never fail: %v", err)
- }
- snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
- if err != nil {
- // the snapshot was done asynchronously with the progress of raft.
- // raft might have already got a newer snapshot.
- if err == raft.ErrSnapOutOfDate {
- return
- }
- plog.Panicf("unexpected create snapshot error %v", err)
- }
- // SaveSnap saves the snapshot and releases the locked wal files
- // to the snapshot index.
- if err = s.r.storage.SaveSnap(snap); err != nil {
- plog.Fatalf("save snapshot error: %v", err)
- }
- plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
-
- // When sending a snapshot, etcd will pause compaction.
- // After receives a snapshot, the slow follower needs to get all the entries right after
- // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
- // the snapshot sent might already be compacted. It happens when the snapshot takes long time
- // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
- if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
- plog.Infof("skip compaction since there is an inflight snapshot")
- return
- }
-
- // keep some in memory log entries for slow followers.
- compacti := uint64(1)
- if snapi > numberOfCatchUpEntries {
- compacti = snapi - numberOfCatchUpEntries
- }
- err = s.r.raftStorage.Compact(compacti)
- if err != nil {
- // the compaction was done asynchronously with the progress of raft.
- // raft log might already been compact.
- if err == raft.ErrCompacted {
- return
- }
- plog.Panicf("unexpected compaction error %v", err)
- }
- plog.Infof("compacted raft log at %d", compacti)
- })
-}
-
-// CutPeer drops messages to the specified peer.
-func (s *EtcdServer) CutPeer(id types.ID) {
- tr, ok := s.r.transport.(*rafthttp.Transport)
- if ok {
- tr.CutPeer(id)
- }
-}
-
-// MendPeer recovers the message dropping behavior of the given peer.
-func (s *EtcdServer) MendPeer(id types.ID) {
- tr, ok := s.r.transport.(*rafthttp.Transport)
- if ok {
- tr.MendPeer(id)
- }
-}
-
-func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
-
-func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
-
-func (s *EtcdServer) ClusterVersion() *semver.Version {
- if s.cluster == nil {
- return nil
- }
- return s.cluster.Version()
-}
-
-// monitorVersions checks the member's version every monitorVersionInterval.
-// It updates the cluster version if all members agrees on a higher one.
-// It prints out log if there is a member with a higher version than the
-// local version.
-func (s *EtcdServer) monitorVersions() {
- for {
- select {
- case <-s.forceVersionC:
- case <-time.After(monitorVersionInterval):
- case <-s.stopping:
- return
- }
-
- if s.Leader() != s.ID() {
- continue
- }
-
- v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt))
- if v != nil {
- // only keep major.minor version for comparison
- v = &semver.Version{
- Major: v.Major,
- Minor: v.Minor,
- }
- }
-
- // if the current version is nil:
- // 1. use the decided version if possible
- // 2. or use the min cluster version
- if s.cluster.Version() == nil {
- verStr := version.MinClusterVersion
- if v != nil {
- verStr = v.String()
- }
- s.goAttach(func() { s.updateClusterVersion(verStr) })
- continue
- }
-
- // update cluster version only if the decided version is greater than
- // the current cluster version
- if v != nil && s.cluster.Version().LessThan(*v) {
- s.goAttach(func() { s.updateClusterVersion(v.String()) })
- }
- }
-}
-
-func (s *EtcdServer) updateClusterVersion(ver string) {
- if s.cluster.Version() == nil {
- plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
- } else {
- plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
- }
- req := pb.Request{
- Method: "PUT",
- Path: membership.StoreClusterVersionKey(),
- Val: ver,
- }
- ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
- _, err := s.Do(ctx, req)
- cancel()
- switch err {
- case nil:
- return
- case ErrStopped:
- plog.Infof("aborting update cluster version because server is stopped")
- return
- default:
- plog.Errorf("error updating cluster version (%v)", err)
- }
-}
-
-func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
- switch err {
- case context.Canceled:
- return ErrCanceled
- case context.DeadlineExceeded:
- s.leadTimeMu.RLock()
- curLeadElected := s.leadElectedTime
- s.leadTimeMu.RUnlock()
- prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
- if start.After(prevLeadLost) && start.Before(curLeadElected) {
- return ErrTimeoutDueToLeaderFail
- }
-
- lead := types.ID(atomic.LoadUint64(&s.r.lead))
- switch lead {
- case types.ID(raft.None):
- // TODO: return error to specify it happens because the cluster does not have leader now
- case s.ID():
- if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
- return ErrTimeoutDueToConnectionLost
- }
- default:
- if !isConnectedSince(s.r.transport, start, lead) {
- return ErrTimeoutDueToConnectionLost
- }
- }
-
- return ErrTimeout
- default:
- return err
- }
-}
-
-func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
-func (s *EtcdServer) Backend() backend.Backend {
- s.bemu.Lock()
- defer s.bemu.Unlock()
- return s.be
-}
-
-func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
-
-func (s *EtcdServer) restoreAlarms() error {
- s.applyV3 = s.newApplierV3()
- as, err := alarm.NewAlarmStore(s)
- if err != nil {
- return err
- }
- s.alarmStore = as
- if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
- s.applyV3 = newApplierV3Capped(s.applyV3)
- }
- if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
- s.applyV3 = newApplierV3Corrupt(s.applyV3)
- }
- return nil
-}
-
-func (s *EtcdServer) getAppliedIndex() uint64 {
- return atomic.LoadUint64(&s.appliedIndex)
-}
-
-func (s *EtcdServer) setAppliedIndex(v uint64) {
- atomic.StoreUint64(&s.appliedIndex, v)
-}
-
-func (s *EtcdServer) getCommittedIndex() uint64 {
- return atomic.LoadUint64(&s.committedIndex)
-}
-
-func (s *EtcdServer) setCommittedIndex(v uint64) {
- atomic.StoreUint64(&s.committedIndex, v)
-}
-
-// goAttach creates a goroutine on a given function and tracks it using
-// the etcdserver waitgroup.
-func (s *EtcdServer) goAttach(f func()) {
- s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
- defer s.wgMu.RUnlock()
- select {
- case <-s.stopping:
- plog.Warning("server has stopped (skipping goAttach)")
- return
- default:
- }
-
- // now safe to add since waitgroup wait has not started yet
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- f()
- }()
-}
-
-func (s *EtcdServer) Alarms() []*pb.AlarmMember {
- return s.alarmStore.Get(pb.AlarmType_NONE)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
deleted file mode 100644
index 928aa95..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "io"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
-)
-
-// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
-// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
-// as ReadCloser.
-func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
- // get a snapshot of v2 store as []byte
- clone := s.store.Clone()
- d, err := clone.SaveNoCopy()
- if err != nil {
- plog.Panicf("store save should never fail: %v", err)
- }
-
- // commit kv to write metadata(for example: consistent index).
- s.KV().Commit()
- dbsnap := s.be.Snapshot()
- // get a snapshot of v3 KV as readCloser
- rc := newSnapshotReaderCloser(dbsnap)
-
- // put the []byte snapshot of store into raft snapshot and return the merged snapshot with
- // KV readCloser snapshot.
- snapshot := raftpb.Snapshot{
- Metadata: raftpb.SnapshotMetadata{
- Index: snapi,
- Term: snapt,
- ConfState: confState,
- },
- Data: d,
- }
- m.Snapshot = snapshot
-
- return *snap.NewMessage(m, rc, dbsnap.Size())
-}
-
-func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser {
- pr, pw := io.Pipe()
- go func() {
- n, err := snapshot.WriteTo(pw)
- if err == nil {
- plog.Infof("wrote database snapshot out [total bytes: %d]", n)
- } else {
- plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
- }
- pw.CloseWithError(err)
- err = snapshot.Close()
- if err != nil {
- plog.Panicf("failed to close database snapshot: %v", err)
- }
- }()
- return pr
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
deleted file mode 100644
index 8f6a54f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stats
-
-import (
- "encoding/json"
- "math"
- "sync"
- "time"
-)
-
-// LeaderStats is used by the leader in an etcd cluster, and encapsulates
-// statistics about communication with its followers
-type LeaderStats struct {
- leaderStats
- sync.Mutex
-}
-
-type leaderStats struct {
- // Leader is the ID of the leader in the etcd cluster.
- // TODO(jonboulle): clarify that these are IDs, not names
- Leader string `json:"leader"`
- Followers map[string]*FollowerStats `json:"followers"`
-}
-
-// NewLeaderStats generates a new LeaderStats with the given id as leader
-func NewLeaderStats(id string) *LeaderStats {
- return &LeaderStats{
- leaderStats: leaderStats{
- Leader: id,
- Followers: make(map[string]*FollowerStats),
- },
- }
-}
-
-func (ls *LeaderStats) JSON() []byte {
- ls.Lock()
- stats := ls.leaderStats
- ls.Unlock()
- b, err := json.Marshal(stats)
- // TODO(jonboulle): appropriate error handling?
- if err != nil {
- plog.Errorf("error marshalling leader stats (%v)", err)
- }
- return b
-}
-
-func (ls *LeaderStats) Follower(name string) *FollowerStats {
- ls.Lock()
- defer ls.Unlock()
- fs, ok := ls.Followers[name]
- if !ok {
- fs = &FollowerStats{}
- fs.Latency.Minimum = 1 << 63
- ls.Followers[name] = fs
- }
- return fs
-}
-
-// FollowerStats encapsulates various statistics about a follower in an etcd cluster
-type FollowerStats struct {
- Latency LatencyStats `json:"latency"`
- Counts CountsStats `json:"counts"`
-
- sync.Mutex
-}
-
-// LatencyStats encapsulates latency statistics.
-type LatencyStats struct {
- Current float64 `json:"current"`
- Average float64 `json:"average"`
- averageSquare float64
- StandardDeviation float64 `json:"standardDeviation"`
- Minimum float64 `json:"minimum"`
- Maximum float64 `json:"maximum"`
-}
-
-// CountsStats encapsulates raft statistics.
-type CountsStats struct {
- Fail uint64 `json:"fail"`
- Success uint64 `json:"success"`
-}
-
-// Succ updates the FollowerStats with a successful send
-func (fs *FollowerStats) Succ(d time.Duration) {
- fs.Lock()
- defer fs.Unlock()
-
- total := float64(fs.Counts.Success) * fs.Latency.Average
- totalSquare := float64(fs.Counts.Success) * fs.Latency.averageSquare
-
- fs.Counts.Success++
-
- fs.Latency.Current = float64(d) / (1000000.0)
-
- if fs.Latency.Current > fs.Latency.Maximum {
- fs.Latency.Maximum = fs.Latency.Current
- }
-
- if fs.Latency.Current < fs.Latency.Minimum {
- fs.Latency.Minimum = fs.Latency.Current
- }
-
- fs.Latency.Average = (total + fs.Latency.Current) / float64(fs.Counts.Success)
- fs.Latency.averageSquare = (totalSquare + fs.Latency.Current*fs.Latency.Current) / float64(fs.Counts.Success)
-
- // sdv = sqrt(avg(x^2) - avg(x)^2)
- fs.Latency.StandardDeviation = math.Sqrt(fs.Latency.averageSquare - fs.Latency.Average*fs.Latency.Average)
-}
-
-// Fail updates the FollowerStats with an unsuccessful send
-func (fs *FollowerStats) Fail() {
- fs.Lock()
- defer fs.Unlock()
- fs.Counts.Fail++
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go b/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go
deleted file mode 100644
index 635074c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stats
-
-import (
- "sync"
- "time"
-)
-
-const (
- queueCapacity = 200
-)
-
-// RequestStats represent the stats for a request.
-// It encapsulates the sending time and the size of the request.
-type RequestStats struct {
- SendingTime time.Time
- Size int
-}
-
-type statsQueue struct {
- items [queueCapacity]*RequestStats
- size int
- front int
- back int
- totalReqSize int
- rwl sync.RWMutex
-}
-
-func (q *statsQueue) Len() int {
- return q.size
-}
-
-func (q *statsQueue) ReqSize() int {
- return q.totalReqSize
-}
-
-// FrontAndBack gets the front and back elements in the queue
-// We must grab front and back together with the protection of the lock
-func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) {
- q.rwl.RLock()
- defer q.rwl.RUnlock()
- if q.size != 0 {
- return q.items[q.front], q.items[q.back]
- }
- return nil, nil
-}
-
-// Insert function insert a RequestStats into the queue and update the records
-func (q *statsQueue) Insert(p *RequestStats) {
- q.rwl.Lock()
- defer q.rwl.Unlock()
-
- q.back = (q.back + 1) % queueCapacity
-
- if q.size == queueCapacity { //dequeue
- q.totalReqSize -= q.items[q.front].Size
- q.front = (q.back + 1) % queueCapacity
- } else {
- q.size++
- }
-
- q.items[q.back] = p
- q.totalReqSize += q.items[q.back].Size
-
-}
-
-// Rate function returns the package rate and byte rate
-func (q *statsQueue) Rate() (float64, float64) {
- front, back := q.frontAndBack()
-
- if front == nil || back == nil {
- return 0, 0
- }
-
- if time.Since(back.SendingTime) > time.Second {
- q.Clear()
- return 0, 0
- }
-
- sampleDuration := back.SendingTime.Sub(front.SendingTime)
-
- pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)
-
- br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second)
-
- return pr, br
-}
-
-// Clear function clear up the statsQueue
-func (q *statsQueue) Clear() {
- q.rwl.Lock()
- defer q.rwl.Unlock()
- q.back = -1
- q.front = 0
- q.size = 0
- q.totalReqSize = 0
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go
deleted file mode 100644
index b026e44..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stats
-
-import (
- "encoding/json"
- "log"
- "sync"
- "time"
-
- "github.com/coreos/etcd/raft"
-)
-
-// ServerStats encapsulates various statistics about an EtcdServer and its
-// communication with other members of the cluster
-type ServerStats struct {
- serverStats
- sync.Mutex
-}
-
-func NewServerStats(name, id string) *ServerStats {
- ss := &ServerStats{
- serverStats: serverStats{
- Name: name,
- ID: id,
- },
- }
- now := time.Now()
- ss.StartTime = now
- ss.LeaderInfo.StartTime = now
- ss.sendRateQueue = &statsQueue{back: -1}
- ss.recvRateQueue = &statsQueue{back: -1}
- return ss
-}
-
-type serverStats struct {
- Name string `json:"name"`
- // ID is the raft ID of the node.
- // TODO(jonboulle): use ID instead of name?
- ID string `json:"id"`
- State raft.StateType `json:"state"`
- StartTime time.Time `json:"startTime"`
-
- LeaderInfo struct {
- Name string `json:"leader"`
- Uptime string `json:"uptime"`
- StartTime time.Time `json:"startTime"`
- } `json:"leaderInfo"`
-
- RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"`
- RecvingPkgRate float64 `json:"recvPkgRate,omitempty"`
- RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"`
-
- SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"`
- SendingPkgRate float64 `json:"sendPkgRate,omitempty"`
- SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"`
-
- sendRateQueue *statsQueue
- recvRateQueue *statsQueue
-}
-
-func (ss *ServerStats) JSON() []byte {
- ss.Lock()
- stats := ss.serverStats
- stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
- stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
- stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
- ss.Unlock()
- b, err := json.Marshal(stats)
- // TODO(jonboulle): appropriate error handling?
- if err != nil {
- log.Printf("stats: error marshalling server stats: %v", err)
- }
- return b
-}
-
-// RecvAppendReq updates the ServerStats in response to an AppendRequest
-// from the given leader being received
-func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {
- ss.Lock()
- defer ss.Unlock()
-
- now := time.Now()
-
- ss.State = raft.StateFollower
- if leader != ss.LeaderInfo.Name {
- ss.LeaderInfo.Name = leader
- ss.LeaderInfo.StartTime = now
- }
-
- ss.recvRateQueue.Insert(
- &RequestStats{
- SendingTime: now,
- Size: reqSize,
- },
- )
- ss.RecvAppendRequestCnt++
-}
-
-// SendAppendReq updates the ServerStats in response to an AppendRequest
-// being sent by this server
-func (ss *ServerStats) SendAppendReq(reqSize int) {
- ss.Lock()
- defer ss.Unlock()
-
- ss.becomeLeader()
-
- ss.sendRateQueue.Insert(
- &RequestStats{
- SendingTime: time.Now(),
- Size: reqSize,
- },
- )
-
- ss.SendAppendRequestCnt++
-}
-
-func (ss *ServerStats) BecomeLeader() {
- ss.Lock()
- defer ss.Unlock()
- ss.becomeLeader()
-}
-
-func (ss *ServerStats) becomeLeader() {
- if ss.State != raft.StateLeader {
- ss.State = raft.StateLeader
- ss.LeaderInfo.Name = ss.ID
- ss.LeaderInfo.StartTime = time.Now()
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go b/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
deleted file mode 100644
index 2b5f707..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package stats defines a standard interface for etcd cluster statistics.
-package stats
-
-import "github.com/coreos/pkg/capnslog"
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats")
-)
-
-type Stats interface {
- // SelfStats returns the struct representing statistics of this server
- SelfStats() []byte
- // LeaderStats returns the statistics of all followers in the cluster
- // if this server is leader. Otherwise, nil is returned.
- LeaderStats() []byte
- // StoreStats returns statistics of the store backing this EtcdServer
- StoreStats() []byte
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go
deleted file mode 100644
index 55c2dd4..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/storage.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "io"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
- "github.com/coreos/etcd/wal"
- "github.com/coreos/etcd/wal/walpb"
-)
-
-type Storage interface {
- // Save function saves ents and state to the underlying stable storage.
- // Save MUST block until st and ents are on stable storage.
- Save(st raftpb.HardState, ents []raftpb.Entry) error
- // SaveSnap function saves snapshot to the underlying stable storage.
- SaveSnap(snap raftpb.Snapshot) error
- // Close closes the Storage and performs finalization.
- Close() error
-}
-
-type storage struct {
- *wal.WAL
- *snap.Snapshotter
-}
-
-func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {
- return &storage{w, s}
-}
-
-// SaveSnap saves the snapshot to disk and release the locked
-// wal files since they will not be used.
-func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
- walsnap := walpb.Snapshot{
- Index: snap.Metadata.Index,
- Term: snap.Metadata.Term,
- }
- err := st.WAL.SaveSnapshot(walsnap)
- if err != nil {
- return err
- }
- err = st.Snapshotter.SaveSnap(snap)
- if err != nil {
- return err
- }
- return st.WAL.ReleaseLockTo(snap.Metadata.Index)
-}
-
-func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
- var (
- err error
- wmetadata []byte
- )
-
- repaired := false
- for {
- if w, err = wal.Open(waldir, snap); err != nil {
- plog.Fatalf("open wal error: %v", err)
- }
- if wmetadata, st, ents, err = w.ReadAll(); err != nil {
- w.Close()
- // we can only repair ErrUnexpectedEOF and we never repair twice.
- if repaired || err != io.ErrUnexpectedEOF {
- plog.Fatalf("read wal error (%v) and cannot be repaired", err)
- }
- if !wal.Repair(waldir) {
- plog.Fatalf("WAL error (%v) cannot be repaired", err)
- } else {
- plog.Infof("repaired WAL error (%v)", err)
- repaired = true
- }
- continue
- }
- break
- }
- var metadata pb.Metadata
- pbutil.MustUnmarshal(&metadata, wmetadata)
- id = types.ID(metadata.NodeID)
- cid = types.ID(metadata.ClusterID)
- return w, id, cid, st, ents
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go
deleted file mode 100644
index 79bb6b8..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/util.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "fmt"
- "reflect"
- "strings"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/rafthttp"
- "github.com/golang/protobuf/proto"
-)
-
-// isConnectedToQuorumSince checks whether the local member is connected to the
-// quorum of the cluster since the given time.
-func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
- return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
-}
-
-// isConnectedSince checks whether the local member is connected to the
-// remote member since the given time.
-func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
- t := transport.ActiveSince(remote)
- return !t.IsZero() && t.Before(since)
-}
-
-// isConnectedFullySince checks whether the local member is connected to all
-// members in the cluster since the given time.
-func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
- return numConnectedSince(transport, since, self, members) == len(members)
-}
-
-// numConnectedSince counts how many members are connected to the local member
-// since the given time.
-func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
- connectedNum := 0
- for _, m := range members {
- if m.ID == self || isConnectedSince(transport, since, m.ID) {
- connectedNum++
- }
- }
- return connectedNum
-}
-
-// longestConnected chooses the member with longest active-since-time.
-// It returns false, if nothing is active.
-func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
- var longest types.ID
- var oldest time.Time
- for _, id := range membs {
- tm := tp.ActiveSince(id)
- if tm.IsZero() { // inactive
- continue
- }
-
- if oldest.IsZero() { // first longest candidate
- oldest = tm
- longest = id
- }
-
- if tm.Before(oldest) {
- oldest = tm
- longest = id
- }
- }
- if uint64(longest) == 0 {
- return longest, false
- }
- return longest, true
-}
-
-type notifier struct {
- c chan struct{}
- err error
-}
-
-func newNotifier() *notifier {
- return ¬ifier{
- c: make(chan struct{}),
- }
-}
-
-func (nc *notifier) notify(err error) {
- nc.err = err
- close(nc.c)
-}
-
-func warnOfExpensiveRequest(now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
- var resp string
- if !isNil(respMsg) {
- resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
- }
- warnOfExpensiveGenericRequest(now, reqStringer, "", resp, err)
-}
-
-func warnOfExpensiveReadOnlyTxnRequest(now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
- reqStringer := pb.NewLoggableTxnRequest(r)
- var resp string
- if !isNil(txnResponse) {
- var resps []string
- for _, r := range txnResponse.Responses {
- switch op := r.Response.(type) {
- case *pb.ResponseOp_ResponseRange:
- resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs)))
- default:
- // only range responses should be in a read only txn request
- }
- }
- resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse))
- }
- warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
-}
-
-func warnOfExpensiveReadOnlyRangeRequest(now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
- var resp string
- if !isNil(rangeResponse) {
- resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse))
- }
- warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
-}
-
-func warnOfExpensiveGenericRequest(now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
- // TODO: add metrics
- d := time.Since(now)
- if d > warnApplyDuration {
- var result string
- if err != nil {
- result = fmt.Sprintf("error:%v", err)
- } else {
- result = resp
- }
- plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d)
- slowApplies.Inc()
- }
-}
-
-func isNil(msg proto.Message) bool {
- return msg == nil || reflect.ValueOf(msg).IsNil()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go
deleted file mode 100644
index b458350..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/store"
-)
-
-type RequestV2 pb.Request
-
-type RequestV2Handler interface {
- Post(ctx context.Context, r *RequestV2) (Response, error)
- Put(ctx context.Context, r *RequestV2) (Response, error)
- Delete(ctx context.Context, r *RequestV2) (Response, error)
- QGet(ctx context.Context, r *RequestV2) (Response, error)
- Get(ctx context.Context, r *RequestV2) (Response, error)
- Head(ctx context.Context, r *RequestV2) (Response, error)
-}
-
-type reqV2HandlerEtcdServer struct {
- reqV2HandlerStore
- s *EtcdServer
-}
-
-type reqV2HandlerStore struct {
- store store.Store
- applier ApplierV2
-}
-
-func NewStoreRequestV2Handler(s store.Store, applier ApplierV2) RequestV2Handler {
- return &reqV2HandlerStore{s, applier}
-}
-
-func (a *reqV2HandlerStore) Post(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Post(r), nil
-}
-
-func (a *reqV2HandlerStore) Put(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Put(r), nil
-}
-
-func (a *reqV2HandlerStore) Delete(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Delete(r), nil
-}
-
-func (a *reqV2HandlerStore) QGet(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.QGet(r), nil
-}
-
-func (a *reqV2HandlerStore) Get(ctx context.Context, r *RequestV2) (Response, error) {
- if r.Wait {
- wc, err := a.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
- return Response{Watcher: wc}, err
- }
- ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
- return Response{Event: ev}, err
-}
-
-func (a *reqV2HandlerStore) Head(ctx context.Context, r *RequestV2) (Response, error) {
- ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
- return Response{Event: ev}, err
-}
-
-func (a *reqV2HandlerEtcdServer) Post(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) Put(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) Delete(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) QGet(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *RequestV2) (Response, error) {
- data, err := ((*pb.Request)(r)).Marshal()
- if err != nil {
- return Response{}, err
- }
- ch := a.s.w.Register(r.ID)
-
- start := time.Now()
- a.s.r.Propose(ctx, data)
- proposalsPending.Inc()
- defer proposalsPending.Dec()
-
- select {
- case x := <-ch:
- resp := x.(Response)
- return resp, resp.Err
- case <-ctx.Done():
- proposalsFailed.Inc()
- a.s.w.Trigger(r.ID, nil) // GC wait
- return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
- case <-a.s.stopping:
- }
- return Response{}, ErrStopped
-}
-
-func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
- r.ID = s.reqIDGen.Next()
- h := &reqV2HandlerEtcdServer{
- reqV2HandlerStore: reqV2HandlerStore{
- store: s.store,
- applier: s.applyV2,
- },
- s: s,
- }
- rp := &r
- resp, err := ((*RequestV2)(rp)).Handle(ctx, h)
- resp.Term, resp.Index = s.Term(), s.Index()
- return resp, err
-}
-
-// Handle interprets r and performs an operation on s.store according to r.Method
-// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
-// Quorum == true, r will be sent through consensus before performing its
-// respective operation. Do will block until an action is performed or there is
-// an error.
-func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) {
- if r.Method == "GET" && r.Quorum {
- r.Method = "QGET"
- }
- switch r.Method {
- case "POST":
- return v2api.Post(ctx, r)
- case "PUT":
- return v2api.Put(ctx, r)
- case "DELETE":
- return v2api.Delete(ctx, r)
- case "QGET":
- return v2api.QGet(ctx, r)
- case "GET":
- return v2api.Get(ctx, r)
- case "HEAD":
- return v2api.Head(ctx, r)
- }
- return Response{}, ErrUnknownMethod
-}
-
-func (r *RequestV2) String() string {
- rpb := pb.Request(*r)
- return rpb.String()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go
deleted file mode 100644
index f214a19..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go
+++ /dev/null
@@ -1,720 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "time"
-
- "github.com/coreos/etcd/auth"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/lease/leasehttp"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/raft"
-
- "github.com/gogo/protobuf/proto"
-)
-
-const (
- // In the health case, there might be a small gap (10s of entries) between
- // the applied index and committed index.
- // However, if the committed entries are very heavy to apply, the gap might grow.
- // We should stop accepting new proposals if the gap growing to a certain point.
- maxGapBetweenApplyAndCommitIndex = 5000
-)
-
-type RaftKV interface {
- Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
- Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
- DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
- Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
- Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
-}
-
-type Lessor interface {
- // LeaseGrant sends LeaseGrant request to raft and apply it after committed.
- LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
- // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
- LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
-
- // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
- // is returned.
- LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
-
- // LeaseTimeToLive retrieves lease information.
- LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
-
- // LeaseLeases lists all leases.
- LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error)
-}
-
-type Authenticator interface {
- AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
- AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
- Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
- UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
- UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
- UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
- UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
- UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
- UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
- RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
- RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
- RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
- RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
- RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
- UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
- RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
-}
-
-func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- var resp *pb.RangeResponse
- var err error
- defer func(start time.Time) {
- warnOfExpensiveReadOnlyRangeRequest(start, r, resp, err)
- }(time.Now())
-
- if !r.Serializable {
- err = s.linearizableReadNotify(ctx)
- if err != nil {
- return nil, err
- }
- }
- chk := func(ai *auth.AuthInfo) error {
- return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
- }
-
- get := func() { resp, err = s.applyV3Base.Range(nil, r) }
- if serr := s.doSerialize(ctx, chk, get); serr != nil {
- err = serr
- return nil, err
- }
- return resp, err
-}
-
-func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.PutResponse), nil
-}
-
-func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.DeleteRangeResponse), nil
-}
-
-func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if isTxnReadonly(r) {
- if !isTxnSerializable(r) {
- err := s.linearizableReadNotify(ctx)
- if err != nil {
- return nil, err
- }
- }
- var resp *pb.TxnResponse
- var err error
- chk := func(ai *auth.AuthInfo) error {
- return checkTxnAuth(s.authStore, ai, r)
- }
-
- defer func(start time.Time) {
- warnOfExpensiveReadOnlyTxnRequest(start, r, resp, err)
- }(time.Now())
-
- get := func() { resp, err = s.applyV3Base.Txn(r) }
- if serr := s.doSerialize(ctx, chk, get); serr != nil {
- return nil, serr
- }
- return resp, err
- }
-
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.TxnResponse), nil
-}
-
-func isTxnSerializable(r *pb.TxnRequest) bool {
- for _, u := range r.Success {
- if r := u.GetRequestRange(); r == nil || !r.Serializable {
- return false
- }
- }
- for _, u := range r.Failure {
- if r := u.GetRequestRange(); r == nil || !r.Serializable {
- return false
- }
- }
- return true
-}
-
-func isTxnReadonly(r *pb.TxnRequest) bool {
- for _, u := range r.Success {
- if r := u.GetRequestRange(); r == nil {
- return false
- }
- }
- for _, u := range r.Failure {
- if r := u.GetRequestRange(); r == nil {
- return false
- }
- }
- return true
-}
-
-func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
- result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
- if r.Physical && result != nil && result.physc != nil {
- <-result.physc
- // The compaction is done deleting keys; the hash is now settled
- // but the data is not necessarily committed. If there's a crash,
- // the hash may revert to a hash prior to compaction completing
- // if the compaction resumes. Force the finished compaction to
- // commit so it won't resume following a crash.
- s.be.ForceCommit()
- }
- if err != nil {
- return nil, err
- }
- if result.err != nil {
- return nil, result.err
- }
- resp := result.resp.(*pb.CompactionResponse)
- if resp == nil {
- resp = &pb.CompactionResponse{}
- }
- if resp.Header == nil {
- resp.Header = &pb.ResponseHeader{}
- }
- resp.Header.Revision = s.kv.Rev()
- return resp, nil
-}
-
-func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- // no id given? choose one
- for r.ID == int64(lease.NoLease) {
- // only use positive int64 id's
- r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
- }
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.LeaseGrantResponse), nil
-}
-
-func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.LeaseRevokeResponse), nil
-}
-
-func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
- ttl, err := s.lessor.Renew(id)
- if err == nil { // already requested to primary lessor(leader)
- return ttl, nil
- }
- if err != lease.ErrNotPrimary {
- return -1, err
- }
-
- cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
- defer cancel()
-
- // renewals don't go through raft; forward to leader manually
- for cctx.Err() == nil && err != nil {
- leader, lerr := s.waitLeader(cctx)
- if lerr != nil {
- return -1, lerr
- }
- for _, url := range leader.PeerURLs {
- lurl := url + leasehttp.LeasePrefix
- ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
- if err == nil || err == lease.ErrLeaseNotFound {
- return ttl, err
- }
- }
- }
- return -1, ErrTimeout
-}
-
-func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
- if s.Leader() == s.ID() {
- // primary; timetolive directly from leader
- le := s.lessor.Lookup(lease.LeaseID(r.ID))
- if le == nil {
- return nil, lease.ErrLeaseNotFound
- }
- // TODO: fill out ResponseHeader
- resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
- if r.Keys {
- ks := le.Keys()
- kbs := make([][]byte, len(ks))
- for i := range ks {
- kbs[i] = []byte(ks[i])
- }
- resp.Keys = kbs
- }
- return resp, nil
- }
-
- cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
- defer cancel()
-
- // forward to leader
- for cctx.Err() == nil {
- leader, err := s.waitLeader(cctx)
- if err != nil {
- return nil, err
- }
- for _, url := range leader.PeerURLs {
- lurl := url + leasehttp.LeaseInternalPrefix
- resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
- if err == nil {
- return resp.LeaseTimeToLiveResponse, nil
- }
- if err == lease.ErrLeaseNotFound {
- return nil, err
- }
- }
- }
- return nil, ErrTimeout
-}
-
-func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
- ls := s.lessor.Leases()
- lss := make([]*pb.LeaseStatus, len(ls))
- for i := range ls {
- lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
- }
- return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
-}
-
-func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
- leader := s.cluster.Member(s.Leader())
- for leader == nil {
- // wait an election
- dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
- select {
- case <-time.After(dur):
- leader = s.cluster.Member(s.Leader())
- case <-s.stopping:
- return nil, ErrStopped
- case <-ctx.Done():
- return nil, ErrNoLeader
- }
- }
- if leader == nil || len(leader.PeerURLs) == 0 {
- return nil, ErrNoLeader
- }
- return leader, nil
-}
-
-func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AlarmResponse), nil
-}
-
-func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthEnableResponse), nil
-}
-
-func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthDisableResponse), nil
-}
-
-func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
- if err := s.linearizableReadNotify(ctx); err != nil {
- return nil, err
- }
-
- var resp proto.Message
- for {
- checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
- if err != nil {
- if err != auth.ErrAuthNotEnabled {
- plog.Errorf("invalid authentication request to user %s was issued", r.Name)
- }
- return nil, err
- }
-
- st, err := s.AuthStore().GenTokenPrefix()
- if err != nil {
- return nil, err
- }
-
- internalReq := &pb.InternalAuthenticateRequest{
- Name: r.Name,
- Password: r.Password,
- SimpleToken: st,
- }
-
- resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
- if err != nil {
- return nil, err
- }
- if checkedRevision == s.AuthStore().Revision() {
- break
- }
- plog.Infof("revision when password checked is obsolete, retrying")
- }
-
- return resp.(*pb.AuthenticateResponse), nil
-}
-
-func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserAddResponse), nil
-}
-
-func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserDeleteResponse), nil
-}
-
-func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserChangePasswordResponse), nil
-}
-
-func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserGrantRoleResponse), nil
-}
-
-func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserGetResponse), nil
-}
-
-func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserListResponse), nil
-}
-
-func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserRevokeRoleResponse), nil
-}
-
-func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleAddResponse), nil
-}
-
-func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleGrantPermissionResponse), nil
-}
-
-func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleGetResponse), nil
-}
-
-func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleListResponse), nil
-}
-
-func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleRevokePermissionResponse), nil
-}
-
-func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleDeleteResponse), nil
-}
-
-func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
- result, err := s.processInternalRaftRequestOnce(ctx, r)
- if err != nil {
- return nil, err
- }
- if result.err != nil {
- return nil, result.err
- }
- return result.resp, nil
-}
-
-func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
- for {
- resp, err := s.raftRequestOnce(ctx, r)
- if err != auth.ErrAuthOldRevision {
- return resp, err
- }
- }
-}
-
-// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
-func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
- for {
- ai, err := s.AuthInfoFromCtx(ctx)
- if err != nil {
- return err
- }
- if ai == nil {
- // chk expects non-nil AuthInfo; use empty credentials
- ai = &auth.AuthInfo{}
- }
- if err = chk(ai); err != nil {
- if err == auth.ErrAuthOldRevision {
- continue
- }
- return err
- }
- // fetch response for serialized request
- get()
- // empty credentials or current auth info means no need to retry
- if ai.Revision == 0 || ai.Revision == s.authStore.Revision() {
- return nil
- }
- // avoid TOCTOU error, retry of the request is required.
- }
-}
-
-func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
- ai := s.getAppliedIndex()
- ci := s.getCommittedIndex()
- if ci > ai+maxGapBetweenApplyAndCommitIndex {
- return nil, ErrTooManyRequests
- }
-
- r.Header = &pb.RequestHeader{
- ID: s.reqIDGen.Next(),
- }
-
- authInfo, err := s.AuthInfoFromCtx(ctx)
- if err != nil {
- return nil, err
- }
- if authInfo != nil {
- r.Header.Username = authInfo.Username
- r.Header.AuthRevision = authInfo.Revision
- }
-
- data, err := r.Marshal()
- if err != nil {
- return nil, err
- }
-
- if len(data) > int(s.Cfg.MaxRequestBytes) {
- return nil, ErrRequestTooLarge
- }
-
- id := r.ID
- if id == 0 {
- id = r.Header.ID
- }
- ch := s.w.Register(id)
-
- cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
- defer cancel()
-
- start := time.Now()
- s.r.Propose(cctx, data)
- proposalsPending.Inc()
- defer proposalsPending.Dec()
-
- select {
- case x := <-ch:
- return x.(*applyResult), nil
- case <-cctx.Done():
- proposalsFailed.Inc()
- s.w.Trigger(id, nil) // GC wait
- return nil, s.parseProposeCtxErr(cctx.Err(), start)
- case <-s.done:
- return nil, ErrStopped
- }
-}
-
-// Watchable returns a watchable interface attached to the etcdserver.
-func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
-
-func (s *EtcdServer) linearizableReadLoop() {
- var rs raft.ReadState
-
- for {
- ctxToSend := make([]byte, 8)
- id1 := s.reqIDGen.Next()
- binary.BigEndian.PutUint64(ctxToSend, id1)
-
- select {
- case <-s.readwaitc:
- case <-s.stopping:
- return
- }
-
- nextnr := newNotifier()
-
- s.readMu.Lock()
- nr := s.readNotifier
- s.readNotifier = nextnr
- s.readMu.Unlock()
-
- cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- if err := s.r.ReadIndex(cctx, ctxToSend); err != nil {
- cancel()
- if err == raft.ErrStopped {
- return
- }
- plog.Errorf("failed to get read index from raft: %v", err)
- readIndexFailed.Inc()
- nr.notify(err)
- continue
- }
- cancel()
-
- var (
- timeout bool
- done bool
- )
- for !timeout && !done {
- select {
- case rs = <-s.r.readStateC:
- done = bytes.Equal(rs.RequestCtx, ctxToSend)
- if !done {
- // a previous request might time out. now we should ignore the response of it and
- // continue waiting for the response of the current requests.
- id2 := uint64(0)
- if len(rs.RequestCtx) == 8 {
- id2 = binary.BigEndian.Uint64(rs.RequestCtx)
- }
- plog.Warningf("ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader (request ID want %d, got %d)", id1, id2)
- slowReadIndex.Inc()
- }
-
- case <-time.After(s.Cfg.ReqTimeout()):
- plog.Warningf("timed out waiting for read index response (local node might have slow network)")
- nr.notify(ErrTimeout)
- timeout = true
- slowReadIndex.Inc()
-
- case <-s.stopping:
- return
- }
- }
- if !done {
- continue
- }
-
- if ai := s.getAppliedIndex(); ai < rs.Index {
- select {
- case <-s.applyWait.Wait(rs.Index):
- case <-s.stopping:
- return
- }
- }
- // unblock all l-reads requested at indices before rs.Index
- nr.notify(nil)
- }
-}
-
-func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
- s.readMu.RLock()
- nc := s.readNotifier
- s.readMu.RUnlock()
-
- // signal linearizable loop for current notify if it hasn't been already
- select {
- case s.readwaitc <- struct{}{}:
- default:
- }
-
- // wait for read state notification
- select {
- case <-nc.c:
- return nc.err
- case <-ctx.Done():
- return ctx.Err()
- case <-s.done:
- return ErrStopped
- }
-}
-
-func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
- authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
- if authInfo != nil || err != nil {
- return authInfo, err
- }
- if !s.Cfg.ClientCertAuthEnabled {
- return nil, nil
- }
- authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
- return authInfo, nil
-}
diff --git a/vendor/github.com/coreos/etcd/functional.yaml b/vendor/github.com/coreos/etcd/functional.yaml
deleted file mode 100644
index 2029a01..0000000
--- a/vendor/github.com/coreos/etcd/functional.yaml
+++ /dev/null
@@ -1,206 +0,0 @@
-agent-configs:
-- etcd-exec-path: ./bin/etcd
- agent-addr: 127.0.0.1:19027
- failpoint-http-addr: http://127.0.0.1:7381
- base-dir: /tmp/etcd-functional-1
- etcd-log-path: /tmp/etcd-functional-1/etcd.log
- etcd-client-proxy: false
- etcd-peer-proxy: true
- etcd-client-endpoint: 127.0.0.1:1379
- etcd:
- name: s1
- data-dir: /tmp/etcd-functional-1/etcd.data
- wal-dir: /tmp/etcd-functional-1/etcd.data/member/wal
- heartbeat-interval: 100
- election-timeout: 1000
- listen-client-urls: ["https://127.0.0.1:1379"]
- advertise-client-urls: ["https://127.0.0.1:1379"]
- auto-tls: true
- client-cert-auth: false
- cert-file: ""
- key-file: ""
- trusted-ca-file: ""
- listen-peer-urls: ["https://127.0.0.1:1380"]
- initial-advertise-peer-urls: ["https://127.0.0.1:1381"]
- peer-auto-tls: true
- peer-client-cert-auth: false
- peer-cert-file: ""
- peer-key-file: ""
- peer-trusted-ca-file: ""
- initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
- initial-cluster-state: new
- initial-cluster-token: tkn
- snapshot-count: 10000
- quota-backend-bytes: 10740000000 # 10 GiB
- pre-vote: true
- initial-corrupt-check: true
- client-cert-data: ""
- client-cert-path: ""
- client-key-data: ""
- client-key-path: ""
- client-trusted-ca-data: ""
- client-trusted-ca-path: ""
- peer-cert-data: ""
- peer-cert-path: ""
- peer-key-data: ""
- peer-key-path: ""
- peer-trusted-ca-data: ""
- peer-trusted-ca-path: ""
- snapshot-path: /tmp/etcd-functional-1.snapshot.db
-
-- etcd-exec-path: ./bin/etcd
- agent-addr: 127.0.0.1:29027
- failpoint-http-addr: http://127.0.0.1:7382
- base-dir: /tmp/etcd-functional-2
- etcd-log-path: /tmp/etcd-functional-2/etcd.log
- etcd-client-proxy: false
- etcd-peer-proxy: true
- etcd-client-endpoint: 127.0.0.1:2379
- etcd:
- name: s2
- data-dir: /tmp/etcd-functional-2/etcd.data
- wal-dir: /tmp/etcd-functional-2/etcd.data/member/wal
- heartbeat-interval: 100
- election-timeout: 1000
- listen-client-urls: ["https://127.0.0.1:2379"]
- advertise-client-urls: ["https://127.0.0.1:2379"]
- auto-tls: true
- client-cert-auth: false
- cert-file: ""
- key-file: ""
- trusted-ca-file: ""
- listen-peer-urls: ["https://127.0.0.1:2380"]
- initial-advertise-peer-urls: ["https://127.0.0.1:2381"]
- peer-auto-tls: true
- peer-client-cert-auth: false
- peer-cert-file: ""
- peer-key-file: ""
- peer-trusted-ca-file: ""
- initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
- initial-cluster-state: new
- initial-cluster-token: tkn
- snapshot-count: 10000
- quota-backend-bytes: 10740000000 # 10 GiB
- pre-vote: true
- initial-corrupt-check: true
- client-cert-data: ""
- client-cert-path: ""
- client-key-data: ""
- client-key-path: ""
- client-trusted-ca-data: ""
- client-trusted-ca-path: ""
- peer-cert-data: ""
- peer-cert-path: ""
- peer-key-data: ""
- peer-key-path: ""
- peer-trusted-ca-data: ""
- peer-trusted-ca-path: ""
- snapshot-path: /tmp/etcd-functional-2.snapshot.db
-
-- etcd-exec-path: ./bin/etcd
- agent-addr: 127.0.0.1:39027
- failpoint-http-addr: http://127.0.0.1:7383
- base-dir: /tmp/etcd-functional-3
- etcd-log-path: /tmp/etcd-functional-3/etcd.log
- etcd-client-proxy: false
- etcd-peer-proxy: true
- etcd-client-endpoint: 127.0.0.1:3379
- etcd:
- name: s3
- data-dir: /tmp/etcd-functional-3/etcd.data
- wal-dir: /tmp/etcd-functional-3/etcd.data/member/wal
- heartbeat-interval: 100
- election-timeout: 1000
- listen-client-urls: ["https://127.0.0.1:3379"]
- advertise-client-urls: ["https://127.0.0.1:3379"]
- auto-tls: true
- client-cert-auth: false
- cert-file: ""
- key-file: ""
- trusted-ca-file: ""
- listen-peer-urls: ["https://127.0.0.1:3380"]
- initial-advertise-peer-urls: ["https://127.0.0.1:3381"]
- peer-auto-tls: true
- peer-client-cert-auth: false
- peer-cert-file: ""
- peer-key-file: ""
- peer-trusted-ca-file: ""
- initial-cluster: s1=https://127.0.0.1:1381,s2=https://127.0.0.1:2381,s3=https://127.0.0.1:3381
- initial-cluster-state: new
- initial-cluster-token: tkn
- snapshot-count: 10000
- quota-backend-bytes: 10740000000 # 10 GiB
- pre-vote: true
- initial-corrupt-check: true
- client-cert-data: ""
- client-cert-path: ""
- client-key-data: ""
- client-key-path: ""
- client-trusted-ca-data: ""
- client-trusted-ca-path: ""
- peer-cert-data: ""
- peer-cert-path: ""
- peer-key-data: ""
- peer-key-path: ""
- peer-trusted-ca-data: ""
- peer-trusted-ca-path: ""
- snapshot-path: /tmp/etcd-functional-3.snapshot.db
-
-tester-config:
- data-dir: /tmp/etcd-tester-data
- network: tcp
- addr: 127.0.0.1:9028
-
- # slow enough to trigger election
- delay-latency-ms: 5000
- delay-latency-ms-rv: 500
-
- round-limit: 1
- exit-on-failure: true
- enable-pprof: true
-
- case-delay-ms: 7000
- case-shuffle: true
-
- # For full descriptions,
- # https://godoc.org/github.com/coreos/etcd/functional/rpcpb#Case
- cases:
- - SIGTERM_ONE_FOLLOWER
- - SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- - SIGTERM_LEADER
- - SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT
- - SIGTERM_QUORUM
- - SIGTERM_ALL
- - SIGQUIT_AND_REMOVE_ONE_FOLLOWER
- - SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER
- - BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT
- - BLACKHOLE_PEER_PORT_TX_RX_LEADER
- - BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT
- - BLACKHOLE_PEER_PORT_TX_RX_QUORUM
- - DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER
- - DELAY_PEER_PORT_TX_RX_LEADER
- - DELAY_PEER_PORT_TX_RX_QUORUM
-
- failpoint-commands:
- - panic("etcd-tester")
-
- runner-exec-path: ./bin/etcd-runner
- external-exec-path: ""
-
- stressers:
- - KV
- - LEASE
-
- checkers:
- - KV_HASH
- - LEASE_EXPIRE
-
- stress-key-size: 100
- stress-key-size-large: 32769
- stress-key-suffix-range: 250000
- stress-key-suffix-range-txn: 100
- stress-key-txn-ops: 10
-
- stress-clients: 100
- stress-qps: 2000
diff --git a/vendor/github.com/coreos/etcd/glide.lock b/vendor/github.com/coreos/etcd/glide.lock
deleted file mode 100644
index 4554c84..0000000
--- a/vendor/github.com/coreos/etcd/glide.lock
+++ /dev/null
@@ -1,199 +0,0 @@
-hash: f0697416d74e4c0fb9d6471c39c3e005ecdeccc8a864c1b0b65e0087b3242027
-updated: 2018-04-10T23:45:04.40596807-07:00
-imports:
-- name: github.com/beorn7/perks
- version: 3a771d992973f24aa725d07868b467d1ddfceafb
- subpackages:
- - quantile
-- name: github.com/bgentry/speakeasy
- version: 4aabc24848ce5fd31929f7d1e4ea74d3709c14cd
-- name: github.com/coreos/bbolt
- version: 48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d
-- name: github.com/coreos/go-semver
- version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6
- subpackages:
- - semver
-- name: github.com/coreos/go-systemd
- version: d2196463941895ee908e13531a23a39feb9e1243
- subpackages:
- - daemon
- - journal
- - util
-- name: github.com/coreos/pkg
- version: 3ac0863d7acf3bc44daf49afef8919af12f704ef
- subpackages:
- - capnslog
- - dlopen
-- name: github.com/cpuguy83/go-md2man
- version: 23709d0847197db6021a51fdb193e66e9222d4e7
- subpackages:
- - md2man
-- name: github.com/dgrijalva/jwt-go
- version: d2709f9f1f31ebcda9651b03077758c1f3a0018c
-- name: github.com/dustin/go-humanize
- version: bb3d318650d48840a39aa21a027c6630e198e626
-- name: github.com/ghodss/yaml
- version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7
-- name: github.com/gogo/protobuf
- version: 342cbe0a04158f6dcb03ca0079991a51a4248c02
- subpackages:
- - gogoproto
- - proto
- - protoc-gen-gogo/descriptor
-- name: github.com/golang/groupcache
- version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
- subpackages:
- - lru
-- name: github.com/golang/protobuf
- version: 1e59b77b52bf8e4b449a57e6f79f21226d571845
- subpackages:
- - jsonpb
- - proto
- - ptypes
- - ptypes/any
- - ptypes/duration
- - ptypes/struct
- - ptypes/timestamp
-- name: github.com/google/btree
- version: 925471ac9e2131377a91e1595defec898166fe49
-- name: github.com/gorilla/websocket
- version: 4201258b820c74ac8e6922fc9e6b52f71fe46f8d
-- name: github.com/grpc-ecosystem/go-grpc-prometheus
- version: 0dafe0d496ea71181bf2dd039e7e3f44b6bd11a7
-- name: github.com/grpc-ecosystem/grpc-gateway
- version: 8cc3a55af3bcf171a1c23a90c4df9cf591706104
- subpackages:
- - runtime
- - runtime/internal
- - utilities
-- name: github.com/inconshreveable/mousetrap
- version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
-- name: github.com/jonboulle/clockwork
- version: 2eee05ed794112d45db504eb05aa693efd2b8b09
-- name: github.com/kr/pty
- version: 2c10821df3c3cf905230d078702dfbe9404c9b23
-- name: github.com/mattn/go-runewidth
- version: 9e777a8366cce605130a531d2cd6363d07ad7317
- subpackages:
- - runewidth.go
-- name: github.com/matttproud/golang_protobuf_extensions
- version: c12348ce28de40eed0136aa2b644d0ee0650e56c
- subpackages:
- - pbutil
-- name: github.com/olekukonko/tablewriter
- version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4
-- name: github.com/prometheus/client_golang
- version: 5cec1d0429b02e4323e042eb04dafdb079ddf568
- subpackages:
- - prometheus
- - prometheus/promhttp
-- name: github.com/prometheus/client_model
- version: 6f3806018612930941127f2a7c6c453ba2c527d2
- subpackages:
- - go
-- name: github.com/prometheus/common
- version: e3fb1a1acd7605367a2b378bc2e2f893c05174b7
- subpackages:
- - expfmt
- - internal/bitbucket.org/ww/goautoneg
- - model
-- name: github.com/prometheus/procfs
- version: a6e9df898b1336106c743392c48ee0b71f5c4efa
- subpackages:
- - xfs
-- name: github.com/russross/blackfriday
- version: 4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c
-- name: github.com/sirupsen/logrus
- version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e
-- name: github.com/soheilhy/cmux
- version: bb79a83465015a27a175925ebd155e660f55e9f1
-- name: github.com/spf13/cobra
- version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
-- name: github.com/spf13/pflag
- version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
-- name: github.com/tmc/grpc-websocket-proxy
- version: 89b8d40f7ca833297db804fcb3be53a76d01c238
- subpackages:
- - wsproxy
-- name: github.com/ugorji/go
- version: bdcc60b419d136a85cdf2e7cbcac34b3f1cd6e57
- subpackages:
- - codec
-- name: github.com/urfave/cli
- version: 1efa31f08b9333f1bd4882d61f9d668a70cd902e
-- name: github.com/xiang90/probing
- version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2
-- name: go.uber.org/atomic
- version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8
-- name: go.uber.org/multierr
- version: 3c4937480c32f4c13a875a1829af76c98ca3d40a
-- name: go.uber.org/zap
- version: 35aad584952c3e7020db7b839f6b102de6271f89
- subpackages:
- - buffer
- - internal/bufferpool
- - internal/color
- - internal/exit
- - zapcore
-- name: golang.org/x/crypto
- version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
- subpackages:
- - bcrypt
- - blowfish
- - ssh/terminal
-- name: golang.org/x/net
- version: 66aacef3dd8a676686c7ae3716979581e8b03c47
- subpackages:
- - context
- - http2
- - http2/hpack
- - idna
- - internal/timeseries
- - lex/httplex
- - trace
-- name: golang.org/x/sys
- version: ebfc5b4631820b793c9010c87fd8fef0f39eb082
- subpackages:
- - unix
- - windows
-- name: golang.org/x/text
- version: b19bf474d317b857955b12035d2c5acb57ce8b01
- subpackages:
- - secure/bidirule
- - transform
- - unicode/bidi
- - unicode/norm
-- name: golang.org/x/time
- version: c06e80d9300e4443158a03817b8a8cb37d230320
- subpackages:
- - rate
-- name: google.golang.org/genproto
- version: 09f6ed296fc66555a25fe4ce95173148778dfa85
- subpackages:
- - googleapis/rpc/status
-- name: google.golang.org/grpc
- version: 5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e
- subpackages:
- - balancer
- - codes
- - connectivity
- - credentials
- - grpclb/grpc_lb_v1/messages
- - grpclog
- - health
- - health/grpc_health_v1
- - internal
- - keepalive
- - metadata
- - naming
- - peer
- - resolver
- - stats
- - status
- - tap
- - transport
-- name: gopkg.in/cheggaaa/pb.v1
- version: 226d21d43a305fac52b3a104ef83e721b15275e0
-- name: gopkg.in/yaml.v2
- version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
-testImports: []
diff --git a/vendor/github.com/coreos/etcd/glide.yaml b/vendor/github.com/coreos/etcd/glide.yaml
deleted file mode 100644
index cc83a85..0000000
--- a/vendor/github.com/coreos/etcd/glide.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-package: github.com/coreos/etcd
-ignore:
-- google.golang.org/appengine
-import:
-- package: github.com/bgentry/speakeasy
- version: v0.1.0
-- package: github.com/coreos/bbolt
- version: v1.3.1-coreos.6
-- package: github.com/coreos/go-semver
- version: v0.2.0
- subpackages:
- - semver
-- package: github.com/coreos/go-systemd
- version: v15
- subpackages:
- - daemon
- - journal
- - util
-- package: go.uber.org/zap
- version: v1.7.1
-- package: github.com/coreos/pkg
- version: v3
- subpackages:
- - capnslog
-- package: github.com/cpuguy83/go-md2man
- version: 23709d0847197db6021a51fdb193e66e9222d4e7
-- package: github.com/dustin/go-humanize
- version: bb3d318650d48840a39aa21a027c6630e198e626
-- package: github.com/ghodss/yaml
- version: v1.0.0
-- package: github.com/gogo/protobuf
- version: v0.5
- subpackages:
- - proto
- - gogoproto
-- package: github.com/gorilla/websocket
- version: 4201258b820c74ac8e6922fc9e6b52f71fe46f8d
-- package: github.com/golang/groupcache
- version: 02826c3e79038b59d737d3b1c0a1d937f71a4433
- subpackages:
- - lru
-- package: github.com/golang/protobuf
- version: 1e59b77b52bf8e4b449a57e6f79f21226d571845
- subpackages:
- - jsonpb
- - proto
-- package: github.com/google/btree
- version: 925471ac9e2131377a91e1595defec898166fe49
-- package: github.com/grpc-ecosystem/grpc-gateway
- version: v1.3.0
- subpackages:
- - runtime
- - runtime/internal
- - utilities
-- package: github.com/jonboulle/clockwork
- version: v0.1.0
-- package: github.com/kr/pty
- version: v1.0.0
-- package: github.com/olekukonko/tablewriter
- version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4
-- package: github.com/mattn/go-runewidth
- version: v0.0.2
- subpackages:
- - runewidth.go
-- package: github.com/prometheus/client_golang
- version: 5cec1d0429b02e4323e042eb04dafdb079ddf568
- subpackages:
- - prometheus
- - prometheus/promhttp
-- package: github.com/prometheus/client_model
- version: 6f3806018612930941127f2a7c6c453ba2c527d2
- subpackages:
- - go
-- package: github.com/prometheus/common
- version: e3fb1a1acd7605367a2b378bc2e2f893c05174b7
-- package: github.com/prometheus/procfs
- version: a6e9df898b1336106c743392c48ee0b71f5c4efa
- subpackages:
- - xfs
-- package: github.com/grpc-ecosystem/go-grpc-prometheus
- version: 0dafe0d496ea71181bf2dd039e7e3f44b6bd11a7
-- package: github.com/spf13/cobra
- version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b
-- package: github.com/spf13/pflag
- version: v1.0.0
-- package: github.com/ugorji/go
- version: bdcc60b419d136a85cdf2e7cbcac34b3f1cd6e57
- subpackages:
- - codec
-- package: github.com/urfave/cli
- version: v1.18.0
-- package: github.com/xiang90/probing
- version: 0.0.1
-- package: golang.org/x/crypto
- version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
- subpackages:
- - bcrypt
- - blowfish
-- package: golang.org/x/net
- version: 66aacef3dd8a676686c7ae3716979581e8b03c47
- subpackages:
- - context
- - http2
- - http2/hpack
- - internal/timeseries
- - trace
-- package: golang.org/x/sys
- version: ebfc5b4631820b793c9010c87fd8fef0f39eb082
-- package: golang.org/x/time
- version: c06e80d9300e4443158a03817b8a8cb37d230320
- subpackages:
- - rate
-- package: google.golang.org/grpc
- version: v1.7.5
- subpackages:
- - codes
- - credentials
- - grpclog
- - internal
- - metadata
- - naming
- - peer
- - transport
- - health
- - health/grpc_health_v1
-- package: gopkg.in/cheggaaa/pb.v1
- version: v1.0.2
-- package: gopkg.in/yaml.v2
- version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b
-- package: github.com/dgrijalva/jwt-go
- version: v3.0.0
-- package: google.golang.org/genproto
- version: 09f6ed296fc66555a25fe4ce95173148778dfa85
- subpackages:
- - googleapis/rpc/status
-- package: golang.org/x/text
- version: b19bf474d317b857955b12035d2c5acb57ce8b01
- subpackages:
- - secure/bidirule
- - transform
- - unicode/bidi
- - unicode/norm
-- package: github.com/russross/blackfriday
- version: 4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c
-- package: github.com/sirupsen/logrus
- version: v1.0.3
-- package: github.com/soheilhy/cmux
- version: v0.1.3
-- package: github.com/tmc/grpc-websocket-proxy
- version: 89b8d40f7ca833297db804fcb3be53a76d01c238
- subpackages:
- - wsproxy
diff --git a/vendor/github.com/coreos/etcd/lease/doc.go b/vendor/github.com/coreos/etcd/lease/doc.go
deleted file mode 100644
index a74eaf7..0000000
--- a/vendor/github.com/coreos/etcd/lease/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package lease provides an interface and implementation for time-limited leases over arbitrary resources.
-package lease
diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go b/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go
deleted file mode 100644
index 8177a37..0000000
--- a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package leasehttp serves lease renewals made through HTTP requests.
-package leasehttp
diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go
deleted file mode 100644
index ac2e788..0000000
--- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasehttp
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/lease/leasepb"
- "github.com/coreos/etcd/pkg/httputil"
-)
-
-var (
- LeasePrefix = "/leases"
- LeaseInternalPrefix = "/leases/internal"
- applyTimeout = time.Second
- ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
-)
-
-// NewHandler returns an http Handler for lease renewals
-func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
- return &leaseHandler{l, waitch}
-}
-
-type leaseHandler struct {
- l lease.Lessor
- waitch func() <-chan struct{}
-}
-
-func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method != "POST" {
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- http.Error(w, "error reading body", http.StatusBadRequest)
- return
- }
-
- var v []byte
- switch r.URL.Path {
- case LeasePrefix:
- lreq := pb.LeaseKeepAliveRequest{}
- if err := lreq.Unmarshal(b); err != nil {
- http.Error(w, "error unmarshalling request", http.StatusBadRequest)
- return
- }
- select {
- case <-h.waitch():
- case <-time.After(applyTimeout):
- http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
- return
- }
- ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
- if err != nil {
- if err == lease.ErrLeaseNotFound {
- http.Error(w, err.Error(), http.StatusNotFound)
- return
- }
-
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
- // TODO: fill out ResponseHeader
- resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
- v, err = resp.Marshal()
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- case LeaseInternalPrefix:
- lreq := leasepb.LeaseInternalRequest{}
- if err := lreq.Unmarshal(b); err != nil {
- http.Error(w, "error unmarshalling request", http.StatusBadRequest)
- return
- }
- select {
- case <-h.waitch():
- case <-time.After(applyTimeout):
- http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
- return
- }
- l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
- if l == nil {
- http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
- return
- }
- // TODO: fill out ResponseHeader
- resp := &leasepb.LeaseInternalResponse{
- LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
- Header: &pb.ResponseHeader{},
- ID: lreq.LeaseTimeToLiveRequest.ID,
- TTL: int64(l.Remaining().Seconds()),
- GrantedTTL: l.TTL(),
- },
- }
- if lreq.LeaseTimeToLiveRequest.Keys {
- ks := l.Keys()
- kbs := make([][]byte, len(ks))
- for i := range ks {
- kbs[i] = []byte(ks[i])
- }
- resp.LeaseTimeToLiveResponse.Keys = kbs
- }
-
- v, err = resp.Marshal()
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- default:
- http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
- return
- }
-
- w.Header().Set("Content-Type", "application/protobuf")
- w.Write(v)
-}
-
-// RenewHTTP renews a lease at a given primary server.
-// TODO: Batch request in future?
-func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) {
- // will post lreq protobuf to leader
- lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal()
- if err != nil {
- return -1, err
- }
-
- cc := &http.Client{Transport: rt}
- req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
- if err != nil {
- return -1, err
- }
- req.Header.Set("Content-Type", "application/protobuf")
- req.Cancel = ctx.Done()
-
- resp, err := cc.Do(req)
- if err != nil {
- return -1, err
- }
- b, err := readResponse(resp)
- if err != nil {
- return -1, err
- }
-
- if resp.StatusCode == http.StatusRequestTimeout {
- return -1, ErrLeaseHTTPTimeout
- }
-
- if resp.StatusCode == http.StatusNotFound {
- return -1, lease.ErrLeaseNotFound
- }
-
- if resp.StatusCode != http.StatusOK {
- return -1, fmt.Errorf("lease: unknown error(%s)", string(b))
- }
-
- lresp := &pb.LeaseKeepAliveResponse{}
- if err := lresp.Unmarshal(b); err != nil {
- return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
- }
- if lresp.ID != int64(id) {
- return -1, fmt.Errorf("lease: renew id mismatch")
- }
- return lresp.TTL, nil
-}
-
-// TimeToLiveHTTP retrieves lease information of the given lease ID.
-func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
- // will post lreq protobuf to leader
- lreq, err := (&leasepb.LeaseInternalRequest{
- LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{
- ID: int64(id),
- Keys: keys,
- },
- }).Marshal()
- if err != nil {
- return nil, err
- }
-
- req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", "application/protobuf")
-
- req = req.WithContext(ctx)
-
- cc := &http.Client{Transport: rt}
- var b []byte
- // buffer errc channel so that errc don't block inside the go routinue
- resp, err := cc.Do(req)
- if err != nil {
- return nil, err
- }
- b, err = readResponse(resp)
- if err != nil {
- return nil, err
- }
- if resp.StatusCode == http.StatusRequestTimeout {
- return nil, ErrLeaseHTTPTimeout
- }
- if resp.StatusCode == http.StatusNotFound {
- return nil, lease.ErrLeaseNotFound
- }
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("lease: unknown error(%s)", string(b))
- }
-
- lresp := &leasepb.LeaseInternalResponse{}
- if err := lresp.Unmarshal(b); err != nil {
- return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
- }
- if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
- return nil, fmt.Errorf("lease: renew id mismatch")
- }
- return lresp, nil
-}
-
-func readResponse(resp *http.Response) (b []byte, err error) {
- b, err = ioutil.ReadAll(resp.Body)
- httputil.GracefulClose(resp)
- return
-}
diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto b/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
deleted file mode 100644
index be414b9..0000000
--- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
+++ /dev/null
@@ -1,24 +0,0 @@
-syntax = "proto3";
-package leasepb;
-
-import "gogoproto/gogo.proto";
-import "etcd/etcdserver/etcdserverpb/rpc.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-message Lease {
- int64 ID = 1;
- int64 TTL = 2;
-}
-
-message LeaseInternalRequest {
- etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1;
-}
-
-message LeaseInternalResponse {
- etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go
deleted file mode 100644
index 43f0503..0000000
--- a/vendor/github.com/coreos/etcd/lease/lessor.go
+++ /dev/null
@@ -1,680 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lease
-
-import (
- "encoding/binary"
- "errors"
- "math"
- "sort"
- "sync"
- "time"
-
- "github.com/coreos/etcd/lease/leasepb"
- "github.com/coreos/etcd/mvcc/backend"
-)
-
-// NoLease is a special LeaseID representing the absence of a lease.
-const NoLease = LeaseID(0)
-
-// MaxLeaseTTL is the maximum lease TTL value
-const MaxLeaseTTL = 9000000000
-
-var (
- forever = time.Time{}
-
- leaseBucketName = []byte("lease")
-
- // maximum number of leases to revoke per second; configurable for tests
- leaseRevokeRate = 1000
-
- ErrNotPrimary = errors.New("not a primary lessor")
- ErrLeaseNotFound = errors.New("lease not found")
- ErrLeaseExists = errors.New("lease already exists")
- ErrLeaseTTLTooLarge = errors.New("too large lease TTL")
-)
-
-// TxnDelete is a TxnWrite that only permits deletes. Defined here
-// to avoid circular dependency with mvcc.
-type TxnDelete interface {
- DeleteRange(key, end []byte) (n, rev int64)
- End()
-}
-
-// RangeDeleter is a TxnDelete constructor.
-type RangeDeleter func() TxnDelete
-
-type LeaseID int64
-
-// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee.
-type Lessor interface {
- // SetRangeDeleter lets the lessor create TxnDeletes to the store.
- // Lessor deletes the items in the revoked or expired lease by creating
- // new TxnDeletes.
- SetRangeDeleter(rd RangeDeleter)
-
- // Grant grants a lease that expires at least after TTL seconds.
- Grant(id LeaseID, ttl int64) (*Lease, error)
- // Revoke revokes a lease with given ID. The item attached to the
- // given lease will be removed. If the ID does not exist, an error
- // will be returned.
- Revoke(id LeaseID) error
-
- // Attach attaches given leaseItem to the lease with given LeaseID.
- // If the lease does not exist, an error will be returned.
- Attach(id LeaseID, items []LeaseItem) error
-
- // GetLease returns LeaseID for given item.
- // If no lease found, NoLease value will be returned.
- GetLease(item LeaseItem) LeaseID
-
- // Detach detaches given leaseItem from the lease with given LeaseID.
- // If the lease does not exist, an error will be returned.
- Detach(id LeaseID, items []LeaseItem) error
-
- // Promote promotes the lessor to be the primary lessor. Primary lessor manages
- // the expiration and renew of leases.
- // Newly promoted lessor renew the TTL of all lease to extend + previous TTL.
- Promote(extend time.Duration)
-
- // Demote demotes the lessor from being the primary lessor.
- Demote()
-
- // Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist,
- // an error will be returned.
- Renew(id LeaseID) (int64, error)
-
- // Lookup gives the lease at a given lease id, if any
- Lookup(id LeaseID) *Lease
-
- // Leases lists all leases.
- Leases() []*Lease
-
- // ExpiredLeasesC returns a chan that is used to receive expired leases.
- ExpiredLeasesC() <-chan []*Lease
-
- // Recover recovers the lessor state from the given backend and RangeDeleter.
- Recover(b backend.Backend, rd RangeDeleter)
-
- // Stop stops the lessor for managing leases. The behavior of calling Stop multiple
- // times is undefined.
- Stop()
-}
-
-// lessor implements Lessor interface.
-// TODO: use clockwork for testability.
-type lessor struct {
- mu sync.Mutex
-
- // demotec is set when the lessor is the primary.
- // demotec will be closed if the lessor is demoted.
- demotec chan struct{}
-
- // TODO: probably this should be a heap with a secondary
- // id index.
- // Now it is O(N) to loop over the leases to find expired ones.
- // We want to make Grant, Revoke, and findExpiredLeases all O(logN) and
- // Renew O(1).
- // findExpiredLeases and Renew should be the most frequent operations.
- leaseMap map[LeaseID]*Lease
-
- itemMap map[LeaseItem]LeaseID
-
- // When a lease expires, the lessor will delete the
- // leased range (or key) by the RangeDeleter.
- rd RangeDeleter
-
- // backend to persist leases. We only persist lease ID and expiry for now.
- // The leased items can be recovered by iterating all the keys in kv.
- b backend.Backend
-
- // minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any
- // requests for shorter TTLs are extended to the minimum TTL.
- minLeaseTTL int64
-
- expiredC chan []*Lease
- // stopC is a channel whose closure indicates that the lessor should be stopped.
- stopC chan struct{}
- // doneC is a channel whose closure indicates that the lessor is stopped.
- doneC chan struct{}
-}
-
-func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor {
- return newLessor(b, minLeaseTTL)
-}
-
-func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
- l := &lessor{
- leaseMap: make(map[LeaseID]*Lease),
- itemMap: make(map[LeaseItem]LeaseID),
- b: b,
- minLeaseTTL: minLeaseTTL,
- // expiredC is a small buffered chan to avoid unnecessary blocking.
- expiredC: make(chan []*Lease, 16),
- stopC: make(chan struct{}),
- doneC: make(chan struct{}),
- }
- l.initAndRecover()
-
- go l.runLoop()
-
- return l
-}
-
-// isPrimary indicates if this lessor is the primary lessor. The primary
-// lessor manages lease expiration and renew.
-//
-// in etcd, raft leader is the primary. Thus there might be two primary
-// leaders at the same time (raft allows concurrent leader but with different term)
-// for at most a leader election timeout.
-// The old primary leader cannot affect the correctness since its proposal has a
-// smaller term and will not be committed.
-//
-// TODO: raft follower do not forward lease management proposals. There might be a
-// very small window (within second normally which depends on go scheduling) that
-// a raft follow is the primary between the raft leader demotion and lessor demotion.
-// Usually this should not be a problem. Lease should not be that sensitive to timing.
-func (le *lessor) isPrimary() bool {
- return le.demotec != nil
-}
-
-func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- le.rd = rd
-}
-
-func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
- if id == NoLease {
- return nil, ErrLeaseNotFound
- }
-
- if ttl > MaxLeaseTTL {
- return nil, ErrLeaseTTLTooLarge
- }
-
- // TODO: when lessor is under high load, it should give out lease
- // with longer TTL to reduce renew load.
- l := &Lease{
- ID: id,
- ttl: ttl,
- itemSet: make(map[LeaseItem]struct{}),
- revokec: make(chan struct{}),
- }
-
- le.mu.Lock()
- defer le.mu.Unlock()
-
- if _, ok := le.leaseMap[id]; ok {
- return nil, ErrLeaseExists
- }
-
- if l.ttl < le.minLeaseTTL {
- l.ttl = le.minLeaseTTL
- }
-
- if le.isPrimary() {
- l.refresh(0)
- } else {
- l.forever()
- }
-
- le.leaseMap[id] = l
- l.persistTo(le.b)
-
- return l, nil
-}
-
-func (le *lessor) Revoke(id LeaseID) error {
- le.mu.Lock()
-
- l := le.leaseMap[id]
- if l == nil {
- le.mu.Unlock()
- return ErrLeaseNotFound
- }
- defer close(l.revokec)
- // unlock before doing external work
- le.mu.Unlock()
-
- if le.rd == nil {
- return nil
- }
-
- txn := le.rd()
-
- // sort keys so deletes are in same order among all members,
- // otherwise the backened hashes will be different
- keys := l.Keys()
- sort.StringSlice(keys).Sort()
- for _, key := range keys {
- txn.DeleteRange([]byte(key), nil)
- }
-
- le.mu.Lock()
- defer le.mu.Unlock()
- delete(le.leaseMap, l.ID)
- // lease deletion needs to be in the same backend transaction with the
- // kv deletion. Or we might end up with not executing the revoke or not
- // deleting the keys if etcdserver fails in between.
- le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID)))
-
- txn.End()
- return nil
-}
-
-// Renew renews an existing lease. If the given lease does not exist or
-// has expired, an error will be returned.
-func (le *lessor) Renew(id LeaseID) (int64, error) {
- le.mu.Lock()
-
- unlock := func() { le.mu.Unlock() }
- defer func() { unlock() }()
-
- if !le.isPrimary() {
- // forward renew request to primary instead of returning error.
- return -1, ErrNotPrimary
- }
-
- demotec := le.demotec
-
- l := le.leaseMap[id]
- if l == nil {
- return -1, ErrLeaseNotFound
- }
-
- if l.expired() {
- le.mu.Unlock()
- unlock = func() {}
- select {
- // A expired lease might be pending for revoking or going through
- // quorum to be revoked. To be accurate, renew request must wait for the
- // deletion to complete.
- case <-l.revokec:
- return -1, ErrLeaseNotFound
- // The expired lease might fail to be revoked if the primary changes.
- // The caller will retry on ErrNotPrimary.
- case <-demotec:
- return -1, ErrNotPrimary
- case <-le.stopC:
- return -1, ErrNotPrimary
- }
- }
-
- l.refresh(0)
- return l.ttl, nil
-}
-
-func (le *lessor) Lookup(id LeaseID) *Lease {
- le.mu.Lock()
- defer le.mu.Unlock()
- return le.leaseMap[id]
-}
-
-func (le *lessor) unsafeLeases() []*Lease {
- leases := make([]*Lease, 0, len(le.leaseMap))
- for _, l := range le.leaseMap {
- leases = append(leases, l)
- }
- sort.Sort(leasesByExpiry(leases))
- return leases
-}
-
-func (le *lessor) Leases() []*Lease {
- le.mu.Lock()
- ls := le.unsafeLeases()
- le.mu.Unlock()
- return ls
-}
-
-func (le *lessor) Promote(extend time.Duration) {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- le.demotec = make(chan struct{})
-
- // refresh the expiries of all leases.
- for _, l := range le.leaseMap {
- l.refresh(extend)
- }
-
- if len(le.leaseMap) < leaseRevokeRate {
- // no possibility of lease pile-up
- return
- }
-
- // adjust expiries in case of overlap
- leases := le.unsafeLeases()
-
- baseWindow := leases[0].Remaining()
- nextWindow := baseWindow + time.Second
- expires := 0
- // have fewer expires than the total revoke rate so piled up leases
- // don't consume the entire revoke limit
- targetExpiresPerSecond := (3 * leaseRevokeRate) / 4
- for _, l := range leases {
- remaining := l.Remaining()
- if remaining > nextWindow {
- baseWindow = remaining
- nextWindow = baseWindow + time.Second
- expires = 1
- continue
- }
- expires++
- if expires <= targetExpiresPerSecond {
- continue
- }
- rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond))
- // If leases are extended by n seconds, leases n seconds ahead of the
- // base window should be extended by only one second.
- rateDelay -= float64(remaining - baseWindow)
- delay := time.Duration(rateDelay)
- nextWindow = baseWindow + delay
- l.refresh(delay + extend)
- }
-}
-
-type leasesByExpiry []*Lease
-
-func (le leasesByExpiry) Len() int { return len(le) }
-func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
-func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
-
-func (le *lessor) Demote() {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- // set the expiries of all leases to forever
- for _, l := range le.leaseMap {
- l.forever()
- }
-
- if le.demotec != nil {
- close(le.demotec)
- le.demotec = nil
- }
-}
-
-// Attach attaches items to the lease with given ID. When the lease
-// expires, the attached items will be automatically removed.
-// If the given lease does not exist, an error will be returned.
-func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- l := le.leaseMap[id]
- if l == nil {
- return ErrLeaseNotFound
- }
-
- l.mu.Lock()
- for _, it := range items {
- l.itemSet[it] = struct{}{}
- le.itemMap[it] = id
- }
- l.mu.Unlock()
- return nil
-}
-
-func (le *lessor) GetLease(item LeaseItem) LeaseID {
- le.mu.Lock()
- id := le.itemMap[item]
- le.mu.Unlock()
- return id
-}
-
-// Detach detaches items from the lease with given ID.
-// If the given lease does not exist, an error will be returned.
-func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- l := le.leaseMap[id]
- if l == nil {
- return ErrLeaseNotFound
- }
-
- l.mu.Lock()
- for _, it := range items {
- delete(l.itemSet, it)
- delete(le.itemMap, it)
- }
- l.mu.Unlock()
- return nil
-}
-
-func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- le.b = b
- le.rd = rd
- le.leaseMap = make(map[LeaseID]*Lease)
- le.itemMap = make(map[LeaseItem]LeaseID)
- le.initAndRecover()
-}
-
-func (le *lessor) ExpiredLeasesC() <-chan []*Lease {
- return le.expiredC
-}
-
-func (le *lessor) Stop() {
- close(le.stopC)
- <-le.doneC
-}
-
-func (le *lessor) runLoop() {
- defer close(le.doneC)
-
- for {
- var ls []*Lease
-
- // rate limit
- revokeLimit := leaseRevokeRate / 2
-
- le.mu.Lock()
- if le.isPrimary() {
- ls = le.findExpiredLeases(revokeLimit)
- }
- le.mu.Unlock()
-
- if len(ls) != 0 {
- select {
- case <-le.stopC:
- return
- case le.expiredC <- ls:
- default:
- // the receiver of expiredC is probably busy handling
- // other stuff
- // let's try this next time after 500ms
- }
- }
-
- select {
- case <-time.After(500 * time.Millisecond):
- case <-le.stopC:
- return
- }
- }
-}
-
-// findExpiredLeases loops leases in the leaseMap until reaching expired limit
-// and returns the expired leases that needed to be revoked.
-func (le *lessor) findExpiredLeases(limit int) []*Lease {
- leases := make([]*Lease, 0, 16)
-
- for _, l := range le.leaseMap {
- // TODO: probably should change to <= 100-500 millisecond to
- // make up committing latency.
- if l.expired() {
- leases = append(leases, l)
-
- // reach expired limit
- if len(leases) == limit {
- break
- }
- }
- }
-
- return leases
-}
-
-func (le *lessor) initAndRecover() {
- tx := le.b.BatchTx()
- tx.Lock()
-
- tx.UnsafeCreateBucket(leaseBucketName)
- _, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0)
- // TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue.
- for i := range vs {
- var lpb leasepb.Lease
- err := lpb.Unmarshal(vs[i])
- if err != nil {
- tx.Unlock()
- panic("failed to unmarshal lease proto item")
- }
- ID := LeaseID(lpb.ID)
- if lpb.TTL < le.minLeaseTTL {
- lpb.TTL = le.minLeaseTTL
- }
- le.leaseMap[ID] = &Lease{
- ID: ID,
- ttl: lpb.TTL,
- // itemSet will be filled in when recover key-value pairs
- // set expiry to forever, refresh when promoted
- itemSet: make(map[LeaseItem]struct{}),
- expiry: forever,
- revokec: make(chan struct{}),
- }
- }
- tx.Unlock()
-
- le.b.ForceCommit()
-}
-
-type Lease struct {
- ID LeaseID
- ttl int64 // time to live in seconds
- // expiryMu protects concurrent accesses to expiry
- expiryMu sync.RWMutex
- // expiry is time when lease should expire. no expiration when expiry.IsZero() is true
- expiry time.Time
-
- // mu protects concurrent accesses to itemSet
- mu sync.RWMutex
- itemSet map[LeaseItem]struct{}
- revokec chan struct{}
-}
-
-func (l *Lease) expired() bool {
- return l.Remaining() <= 0
-}
-
-func (l *Lease) persistTo(b backend.Backend) {
- key := int64ToBytes(int64(l.ID))
-
- lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)}
- val, err := lpb.Marshal()
- if err != nil {
- panic("failed to marshal lease proto item")
- }
-
- b.BatchTx().Lock()
- b.BatchTx().UnsafePut(leaseBucketName, key, val)
- b.BatchTx().Unlock()
-}
-
-// TTL returns the TTL of the Lease.
-func (l *Lease) TTL() int64 {
- return l.ttl
-}
-
-// refresh refreshes the expiry of the lease.
-func (l *Lease) refresh(extend time.Duration) {
- newExpiry := time.Now().Add(extend + time.Duration(l.ttl)*time.Second)
- l.expiryMu.Lock()
- defer l.expiryMu.Unlock()
- l.expiry = newExpiry
-}
-
-// forever sets the expiry of lease to be forever.
-func (l *Lease) forever() {
- l.expiryMu.Lock()
- defer l.expiryMu.Unlock()
- l.expiry = forever
-}
-
-// Keys returns all the keys attached to the lease.
-func (l *Lease) Keys() []string {
- l.mu.RLock()
- keys := make([]string, 0, len(l.itemSet))
- for k := range l.itemSet {
- keys = append(keys, k.Key)
- }
- l.mu.RUnlock()
- return keys
-}
-
-// Remaining returns the remaining time of the lease.
-func (l *Lease) Remaining() time.Duration {
- l.expiryMu.RLock()
- defer l.expiryMu.RUnlock()
- if l.expiry.IsZero() {
- return time.Duration(math.MaxInt64)
- }
- return time.Until(l.expiry)
-}
-
-type LeaseItem struct {
- Key string
-}
-
-func int64ToBytes(n int64) []byte {
- bytes := make([]byte, 8)
- binary.BigEndian.PutUint64(bytes, uint64(n))
- return bytes
-}
-
-// FakeLessor is a fake implementation of Lessor interface.
-// Used for testing only.
-type FakeLessor struct{}
-
-func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
-
-func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil }
-
-func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
-
-func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
-
-func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 }
-func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
-
-func (fl *FakeLessor) Promote(extend time.Duration) {}
-
-func (fl *FakeLessor) Demote() {}
-
-func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
-
-func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil }
-
-func (fl *FakeLessor) Leases() []*Lease { return nil }
-
-func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil }
-
-func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {}
-
-func (fl *FakeLessor) Stop() {}
diff --git a/vendor/github.com/coreos/etcd/main.go b/vendor/github.com/coreos/etcd/main.go
deleted file mode 100644
index 0b73573..0000000
--- a/vendor/github.com/coreos/etcd/main.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package main is a simple wrapper of the real etcd entrypoint package
-// (located at github.com/coreos/etcd/etcdmain) to ensure that etcd is still
-// "go getable"; e.g. `go get github.com/coreos/etcd` works as expected and
-// builds a binary in $GOBIN/etcd
-//
-// This package should NOT be extended or modified in any way; to modify the
-// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package.
-//
-package main
-
-import "github.com/coreos/etcd/etcdmain"
-
-func main() {
- etcdmain.Main()
-}
diff --git a/vendor/github.com/coreos/etcd/meeting.ics b/vendor/github.com/coreos/etcd/meeting.ics
deleted file mode 100644
index 0157f9a..0000000
--- a/vendor/github.com/coreos/etcd/meeting.ics
+++ /dev/null
@@ -1,49 +0,0 @@
-BEGIN:VCALENDAR
-PRODID:-//Google Inc//Google Calendar 70.9054//EN
-VERSION:2.0
-CALSCALE:GREGORIAN
-METHOD:REPLY
-BEGIN:VTIMEZONE
-TZID:America/Los_Angeles
-X-LIC-LOCATION:America/Los_Angeles
-BEGIN:DAYLIGHT
-TZOFFSETFROM:-0800
-TZOFFSETTO:-0700
-TZNAME:PDT
-DTSTART:19700308T020000
-RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
-END:DAYLIGHT
-BEGIN:STANDARD
-TZOFFSETFROM:-0700
-TZOFFSETTO:-0800
-TZNAME:PST
-DTSTART:19701101T020000
-RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
-END:STANDARD
-END:VTIMEZONE
-BEGIN:VEVENT
-DTSTART;TZID=America/Los_Angeles:20180116T110000
-DTEND;TZID=America/Los_Angeles:20180116T115000
-RRULE:FREQ=WEEKLY;INTERVAL=2;BYDAY=TU
-DTSTAMP:20171213T131221Z
-ORGANIZER;CN=Gyuho Lee:mailto:gyu_ho.lee@coreos.com
-UID:11ivec3kg2egsng3vrl8t5alar@google.com
-CREATED:20171212T194217Z
-DESCRIPTION:<br>Please add your discussion items to the meeting notes.<br><
- br>Meeting notes<br><a href="https://docs.google.com/document/d/1DbVXOHvd9s
- cFsSmL2oNg4YGOHJdXqtx583DmeVWrB_M/edit?usp=sharing">https://docs.google.com
- /document/d/1DbVXOHvd9scFsSmL2oNg4YGOHJdXqtx583DmeVWrB_M/edit?usp=sharing</
- a><br><br>Zoom meeting<br><a href="https://www.google.com/url?q=https%3A%2F
- %2Fcoreos.zoom.us%2Fj%2F854793406&\;sa=D&\;ust=1509474820520000&\;
- usg=AFQjCNFIOIfx1O_dgC-1N5YLyLOMa7D3Dg" target="_blank">https://coreos.zoom
- .us/j/854793406</a><br><br>Slack<br><a href="https://www.google.com/url?q=h
- ttps%3A%2F%2Fkubernetes.slack.com&\;sa=D&\;ust=1513114941738000&\;
- usg=AFQjCNHbdDPJcyZ2tVATRqTQDuZDFzGoRQ" target="_blank">https://kubernetes.
- slack.com</a> <i>#etcd</i><br><br><i><br></i>
-LAST-MODIFIED:20171213T131220Z
-SEQUENCE:0
-STATUS:CONFIRMED
-SUMMARY:etcd meeting
-TRANSP:OPAQUE
-END:VEVENT
-END:VCALENDAR
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go
deleted file mode 100644
index f7d9e60..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go
+++ /dev/null
@@ -1,464 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "fmt"
- "hash/crc32"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "time"
-
- bolt "github.com/coreos/bbolt"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- defaultBatchLimit = 10000
- defaultBatchInterval = 100 * time.Millisecond
-
- defragLimit = 10000
-
- // initialMmapSize is the initial size of the mmapped region. Setting this larger than
- // the potential max db size can prevent writer from blocking reader.
- // This only works for linux.
- initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
-
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend")
-
- // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
- minSnapshotWarningTimeout = time.Duration(30 * time.Second)
-)
-
-type Backend interface {
- ReadTx() ReadTx
- BatchTx() BatchTx
-
- Snapshot() Snapshot
- Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
- // Size returns the current size of the backend.
- Size() int64
- // SizeInUse returns the current size of the backend logically in use.
- // Since the backend can manage free space in a non-byte unit such as
- // number of pages, the returned value can be not exactly accurate in bytes.
- SizeInUse() int64
- Defrag() error
- ForceCommit()
- Close() error
-}
-
-type Snapshot interface {
- // Size gets the size of the snapshot.
- Size() int64
- // WriteTo writes the snapshot into the given writer.
- WriteTo(w io.Writer) (n int64, err error)
- // Close closes the snapshot.
- Close() error
-}
-
-type backend struct {
- // size and commits are used with atomic operations so they must be
- // 64-bit aligned, otherwise 32-bit tests will crash
-
- // size is the number of bytes in the backend
- size int64
-
- // sizeInUse is the number of bytes actually used in the backend
- sizeInUse int64
-
- // commits counts number of commits since start
- commits int64
-
- mu sync.RWMutex
- db *bolt.DB
-
- batchInterval time.Duration
- batchLimit int
- batchTx *batchTxBuffered
-
- readTx *readTx
-
- stopc chan struct{}
- donec chan struct{}
-}
-
-type BackendConfig struct {
- // Path is the file path to the backend file.
- Path string
- // BatchInterval is the maximum time before flushing the BatchTx.
- BatchInterval time.Duration
- // BatchLimit is the maximum puts before flushing the BatchTx.
- BatchLimit int
- // MmapSize is the number of bytes to mmap for the backend.
- MmapSize uint64
-}
-
-func DefaultBackendConfig() BackendConfig {
- return BackendConfig{
- BatchInterval: defaultBatchInterval,
- BatchLimit: defaultBatchLimit,
- MmapSize: initialMmapSize,
- }
-}
-
-func New(bcfg BackendConfig) Backend {
- return newBackend(bcfg)
-}
-
-func NewDefaultBackend(path string) Backend {
- bcfg := DefaultBackendConfig()
- bcfg.Path = path
- return newBackend(bcfg)
-}
-
-func newBackend(bcfg BackendConfig) *backend {
- bopts := &bolt.Options{}
- if boltOpenOptions != nil {
- *bopts = *boltOpenOptions
- }
- bopts.InitialMmapSize = bcfg.mmapSize()
-
- db, err := bolt.Open(bcfg.Path, 0600, bopts)
- if err != nil {
- plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
- }
-
- // In future, may want to make buffering optional for low-concurrency systems
- // or dynamically swap between buffered/non-buffered depending on workload.
- b := &backend{
- db: db,
-
- batchInterval: bcfg.BatchInterval,
- batchLimit: bcfg.BatchLimit,
-
- readTx: &readTx{
- buf: txReadBuffer{
- txBuffer: txBuffer{make(map[string]*bucketBuffer)},
- },
- buckets: make(map[string]*bolt.Bucket),
- },
-
- stopc: make(chan struct{}),
- donec: make(chan struct{}),
- }
- b.batchTx = newBatchTxBuffered(b)
- go b.run()
- return b
-}
-
-// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
-// write operations. The write result can be retrieved within the same tx immediately.
-// The write result is isolated with other txs until the current one get committed.
-func (b *backend) BatchTx() BatchTx {
- return b.batchTx
-}
-
-func (b *backend) ReadTx() ReadTx { return b.readTx }
-
-// ForceCommit forces the current batching tx to commit.
-func (b *backend) ForceCommit() {
- b.batchTx.Commit()
-}
-
-func (b *backend) Snapshot() Snapshot {
- b.batchTx.Commit()
-
- b.mu.RLock()
- defer b.mu.RUnlock()
- tx, err := b.db.Begin(false)
- if err != nil {
- plog.Fatalf("cannot begin tx (%s)", err)
- }
-
- stopc, donec := make(chan struct{}), make(chan struct{})
- dbBytes := tx.Size()
- go func() {
- defer close(donec)
- // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
- // assuming a min tcp throughput of 100MB/s.
- var sendRateBytes int64 = 100 * 1024 * 1014
- warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
- if warningTimeout < minSnapshotWarningTimeout {
- warningTimeout = minSnapshotWarningTimeout
- }
- start := time.Now()
- ticker := time.NewTicker(warningTimeout)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
- case <-stopc:
- snapshotDurations.Observe(time.Since(start).Seconds())
- return
- }
- }
- }()
-
- return &snapshot{tx, stopc, donec}
-}
-
-type IgnoreKey struct {
- Bucket string
- Key string
-}
-
-func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
- h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
-
- b.mu.RLock()
- defer b.mu.RUnlock()
- err := b.db.View(func(tx *bolt.Tx) error {
- c := tx.Cursor()
- for next, _ := c.First(); next != nil; next, _ = c.Next() {
- b := tx.Bucket(next)
- if b == nil {
- return fmt.Errorf("cannot get hash of bucket %s", string(next))
- }
- h.Write(next)
- b.ForEach(func(k, v []byte) error {
- bk := IgnoreKey{Bucket: string(next), Key: string(k)}
- if _, ok := ignores[bk]; !ok {
- h.Write(k)
- h.Write(v)
- }
- return nil
- })
- }
- return nil
- })
-
- if err != nil {
- return 0, err
- }
-
- return h.Sum32(), nil
-}
-
-func (b *backend) Size() int64 {
- return atomic.LoadInt64(&b.size)
-}
-
-func (b *backend) SizeInUse() int64 {
- return atomic.LoadInt64(&b.sizeInUse)
-}
-
-func (b *backend) run() {
- defer close(b.donec)
- t := time.NewTimer(b.batchInterval)
- defer t.Stop()
- for {
- select {
- case <-t.C:
- case <-b.stopc:
- b.batchTx.CommitAndStop()
- return
- }
- b.batchTx.Commit()
- t.Reset(b.batchInterval)
- }
-}
-
-func (b *backend) Close() error {
- close(b.stopc)
- <-b.donec
- return b.db.Close()
-}
-
-// Commits returns total number of commits since start
-func (b *backend) Commits() int64 {
- return atomic.LoadInt64(&b.commits)
-}
-
-func (b *backend) Defrag() error {
- return b.defrag()
-}
-
-func (b *backend) defrag() error {
- now := time.Now()
-
- // TODO: make this non-blocking?
- // lock batchTx to ensure nobody is using previous tx, and then
- // close previous ongoing tx.
- b.batchTx.Lock()
- defer b.batchTx.Unlock()
-
- // lock database after lock tx to avoid deadlock.
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // block concurrent read requests while resetting tx
- b.readTx.mu.Lock()
- defer b.readTx.mu.Unlock()
-
- b.batchTx.unsafeCommit(true)
- b.batchTx.tx = nil
-
- tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions)
- if err != nil {
- return err
- }
-
- err = defragdb(b.db, tmpdb, defragLimit)
-
- if err != nil {
- tmpdb.Close()
- os.RemoveAll(tmpdb.Path())
- return err
- }
-
- dbp := b.db.Path()
- tdbp := tmpdb.Path()
-
- err = b.db.Close()
- if err != nil {
- plog.Fatalf("cannot close database (%s)", err)
- }
- err = tmpdb.Close()
- if err != nil {
- plog.Fatalf("cannot close database (%s)", err)
- }
- err = os.Rename(tdbp, dbp)
- if err != nil {
- plog.Fatalf("cannot rename database (%s)", err)
- }
-
- b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
- if err != nil {
- plog.Panicf("cannot open database at %s (%v)", dbp, err)
- }
- b.batchTx.tx, err = b.db.Begin(true)
- if err != nil {
- plog.Fatalf("cannot begin tx (%s)", err)
- }
-
- b.readTx.reset()
- b.readTx.tx = b.unsafeBegin(false)
-
- size := b.readTx.tx.Size()
- db := b.db
- atomic.StoreInt64(&b.size, size)
- atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
-
- took := time.Since(now)
- defragDurations.Observe(took.Seconds())
-
- return nil
-}
-
-func defragdb(odb, tmpdb *bolt.DB, limit int) error {
- // open a tx on tmpdb for writes
- tmptx, err := tmpdb.Begin(true)
- if err != nil {
- return err
- }
-
- // open a tx on old db for read
- tx, err := odb.Begin(false)
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- c := tx.Cursor()
-
- count := 0
- for next, _ := c.First(); next != nil; next, _ = c.Next() {
- b := tx.Bucket(next)
- if b == nil {
- return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
- }
-
- tmpb, berr := tmptx.CreateBucketIfNotExists(next)
- if berr != nil {
- return berr
- }
- tmpb.FillPercent = 0.9 // for seq write in for each
-
- b.ForEach(func(k, v []byte) error {
- count++
- if count > limit {
- err = tmptx.Commit()
- if err != nil {
- return err
- }
- tmptx, err = tmpdb.Begin(true)
- if err != nil {
- return err
- }
- tmpb = tmptx.Bucket(next)
- tmpb.FillPercent = 0.9 // for seq write in for each
-
- count = 0
- }
- return tmpb.Put(k, v)
- })
- }
-
- return tmptx.Commit()
-}
-
-func (b *backend) begin(write bool) *bolt.Tx {
- b.mu.RLock()
- tx := b.unsafeBegin(write)
- b.mu.RUnlock()
-
- size := tx.Size()
- db := tx.DB()
- atomic.StoreInt64(&b.size, size)
- atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
-
- return tx
-}
-
-func (b *backend) unsafeBegin(write bool) *bolt.Tx {
- tx, err := b.db.Begin(write)
- if err != nil {
- plog.Fatalf("cannot begin tx (%s)", err)
- }
- return tx
-}
-
-// NewTmpBackend creates a backend implementation for testing.
-func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
- dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
- if err != nil {
- plog.Fatal(err)
- }
- tmpPath := filepath.Join(dir, "database")
- bcfg := DefaultBackendConfig()
- bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit
- return newBackend(bcfg), tmpPath
-}
-
-func NewDefaultTmpBackend() (*backend, string) {
- return NewTmpBackend(defaultBatchInterval, defaultBatchLimit)
-}
-
-type snapshot struct {
- *bolt.Tx
- stopc chan struct{}
- donec chan struct{}
-}
-
-func (s *snapshot) Close() error {
- close(s.stopc)
- <-s.donec
- return s.Tx.Rollback()
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
deleted file mode 100644
index aed6893..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- bolt "github.com/coreos/bbolt"
-)
-
-type BatchTx interface {
- ReadTx
- UnsafeCreateBucket(name []byte)
- UnsafePut(bucketName []byte, key []byte, value []byte)
- UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
- UnsafeDelete(bucketName []byte, key []byte)
- // Commit commits a previous tx and begins a new writable one.
- Commit()
- // CommitAndStop commits the previous tx and does not create a new one.
- CommitAndStop()
-}
-
-type batchTx struct {
- sync.Mutex
- tx *bolt.Tx
- backend *backend
-
- pending int
-}
-
-func (t *batchTx) UnsafeCreateBucket(name []byte) {
- _, err := t.tx.CreateBucket(name)
- if err != nil && err != bolt.ErrBucketExists {
- plog.Fatalf("cannot create bucket %s (%v)", name, err)
- }
- t.pending++
-}
-
-// UnsafePut must be called holding the lock on the tx.
-func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
- t.unsafePut(bucketName, key, value, false)
-}
-
-// UnsafeSeqPut must be called holding the lock on the tx.
-func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
- t.unsafePut(bucketName, key, value, true)
-}
-
-func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- plog.Fatalf("bucket %s does not exist", bucketName)
- }
- if seq {
- // it is useful to increase fill percent when the workloads are mostly append-only.
- // this can delay the page split and reduce space usage.
- bucket.FillPercent = 0.9
- }
- if err := bucket.Put(key, value); err != nil {
- plog.Fatalf("cannot put key into bucket (%v)", err)
- }
- t.pending++
-}
-
-// UnsafeRange must be called holding the lock on the tx.
-func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- plog.Fatalf("bucket %s does not exist", bucketName)
- }
- return unsafeRange(bucket.Cursor(), key, endKey, limit)
-}
-
-func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
- if limit <= 0 {
- limit = math.MaxInt64
- }
- var isMatch func(b []byte) bool
- if len(endKey) > 0 {
- isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
- } else {
- isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
- limit = 1
- }
- for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
- vs = append(vs, cv)
- keys = append(keys, ck)
- if limit == int64(len(keys)) {
- break
- }
- }
- return keys, vs
-}
-
-// UnsafeDelete must be called holding the lock on the tx.
-func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- plog.Fatalf("bucket %s does not exist", bucketName)
- }
- err := bucket.Delete(key)
- if err != nil {
- plog.Fatalf("cannot delete key from bucket (%v)", err)
- }
- t.pending++
-}
-
-// UnsafeForEach must be called holding the lock on the tx.
-func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- return unsafeForEach(t.tx, bucketName, visitor)
-}
-
-func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
- if b := tx.Bucket(bucket); b != nil {
- return b.ForEach(visitor)
- }
- return nil
-}
-
-// Commit commits a previous tx and begins a new writable one.
-func (t *batchTx) Commit() {
- t.Lock()
- t.commit(false)
- t.Unlock()
-}
-
-// CommitAndStop commits the previous tx and does not create a new one.
-func (t *batchTx) CommitAndStop() {
- t.Lock()
- t.commit(true)
- t.Unlock()
-}
-
-func (t *batchTx) Unlock() {
- if t.pending >= t.backend.batchLimit {
- t.commit(false)
- }
- t.Mutex.Unlock()
-}
-
-func (t *batchTx) commit(stop bool) {
- // commit the last tx
- if t.tx != nil {
- if t.pending == 0 && !stop {
- return
- }
-
- start := time.Now()
-
- // gofail: var beforeCommit struct{}
- err := t.tx.Commit()
- // gofail: var afterCommit struct{}
-
- commitDurations.Observe(time.Since(start).Seconds())
- atomic.AddInt64(&t.backend.commits, 1)
-
- t.pending = 0
- if err != nil {
- plog.Fatalf("cannot commit tx (%s)", err)
- }
- }
- if !stop {
- t.tx = t.backend.begin(true)
- }
-}
-
-type batchTxBuffered struct {
- batchTx
- buf txWriteBuffer
-}
-
-func newBatchTxBuffered(backend *backend) *batchTxBuffered {
- tx := &batchTxBuffered{
- batchTx: batchTx{backend: backend},
- buf: txWriteBuffer{
- txBuffer: txBuffer{make(map[string]*bucketBuffer)},
- seq: true,
- },
- }
- tx.Commit()
- return tx
-}
-
-func (t *batchTxBuffered) Unlock() {
- if t.pending != 0 {
- t.backend.readTx.mu.Lock()
- t.buf.writeback(&t.backend.readTx.buf)
- t.backend.readTx.mu.Unlock()
- if t.pending >= t.backend.batchLimit {
- t.commit(false)
- }
- }
- t.batchTx.Unlock()
-}
-
-func (t *batchTxBuffered) Commit() {
- t.Lock()
- t.commit(false)
- t.Unlock()
-}
-
-func (t *batchTxBuffered) CommitAndStop() {
- t.Lock()
- t.commit(true)
- t.Unlock()
-}
-
-func (t *batchTxBuffered) commit(stop bool) {
- // all read txs must be closed to acquire boltdb commit rwlock
- t.backend.readTx.mu.Lock()
- t.unsafeCommit(stop)
- t.backend.readTx.mu.Unlock()
-}
-
-func (t *batchTxBuffered) unsafeCommit(stop bool) {
- if t.backend.readTx.tx != nil {
- if err := t.backend.readTx.tx.Rollback(); err != nil {
- plog.Fatalf("cannot rollback tx (%s)", err)
- }
- t.backend.readTx.reset()
- }
-
- t.batchTx.commit(stop)
-
- if !stop {
- t.backend.readTx.tx = t.backend.begin(false)
- }
-}
-
-func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
- t.batchTx.UnsafePut(bucketName, key, value)
- t.buf.put(bucketName, key, value)
-}
-
-func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
- t.batchTx.UnsafeSeqPut(bucketName, key, value)
- t.buf.putSeq(bucketName, key, value)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go
deleted file mode 100644
index edfed00..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux,!windows
-
-package backend
-
-import bolt "github.com/coreos/bbolt"
-
-var boltOpenOptions *bolt.Options = nil
-
-func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go
deleted file mode 100644
index b01785f..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "syscall"
-
- bolt "github.com/coreos/bbolt"
-)
-
-// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead
-// which can speed up entire-database read with boltdb. We want to
-// enable MAP_POPULATE for faster key-value store recovery in storage
-// package. If your kernel version is lower than 2.6.23
-// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might
-// silently ignore this flag. Please update your kernel to prevent this.
-var boltOpenOptions = &bolt.Options{
- MmapFlags: syscall.MAP_POPULATE,
- NoFreelistSync: true,
-}
-
-func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go
deleted file mode 100644
index 71d0270..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package backend
-
-import bolt "github.com/coreos/bbolt"
-
-var boltOpenOptions *bolt.Options = nil
-
-// setting mmap size != 0 on windows will allocate the entire
-// mmap size for the file, instead of growing it. So, force 0.
-
-func (bcfg *BackendConfig) mmapSize() int { return 0 }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go b/vendor/github.com/coreos/etcd/mvcc/backend/doc.go
deleted file mode 100644
index 9cc42fa..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package backend defines a standard interface for etcd's backend MVCC storage.
-package backend
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go
deleted file mode 100644
index 3415708..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_commit_duration_seconds",
- Help: "The latency distributions of commit called by backend.",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- defragDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_defrag_duration_seconds",
- Help: "The latency distribution of backend defragmentation.",
-
- // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
- // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
- // highest bucket start of 0.1 sec * 2^12 == 409.6 sec
- Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
- })
-
- snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_snapshot_duration_seconds",
- Help: "The latency distribution of backend snapshots.",
-
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^16 == 655.36 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
- })
-)
-
-func init() {
- prometheus.MustRegister(commitDurations)
- prometheus.MustRegister(defragDurations)
- prometheus.MustRegister(snapshotDurations)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go
deleted file mode 100644
index 0536de7..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "math"
- "sync"
-
- bolt "github.com/coreos/bbolt"
-)
-
-// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
-// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
-// is known to never overwrite any key so range is safe.
-var safeRangeBucket = []byte("key")
-
-type ReadTx interface {
- Lock()
- Unlock()
-
- UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
- UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error
-}
-
-type readTx struct {
- // mu protects accesses to the txReadBuffer
- mu sync.RWMutex
- buf txReadBuffer
-
- // txmu protects accesses to buckets and tx on Range requests.
- txmu sync.RWMutex
- tx *bolt.Tx
- buckets map[string]*bolt.Bucket
-}
-
-func (rt *readTx) Lock() { rt.mu.RLock() }
-func (rt *readTx) Unlock() { rt.mu.RUnlock() }
-
-func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- if endKey == nil {
- // forbid duplicates for single keys
- limit = 1
- }
- if limit <= 0 {
- limit = math.MaxInt64
- }
- if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
- panic("do not use unsafeRange on non-keys bucket")
- }
- keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
- if int64(len(keys)) == limit {
- return keys, vals
- }
-
- // find/cache bucket
- bn := string(bucketName)
- rt.txmu.RLock()
- bucket, ok := rt.buckets[bn]
- rt.txmu.RUnlock()
- if !ok {
- rt.txmu.Lock()
- bucket = rt.tx.Bucket(bucketName)
- rt.buckets[bn] = bucket
- rt.txmu.Unlock()
- }
-
- // ignore missing bucket since may have been created in this batch
- if bucket == nil {
- return keys, vals
- }
- rt.txmu.Lock()
- c := bucket.Cursor()
- rt.txmu.Unlock()
-
- k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
- return append(k2, keys...), append(v2, vals...)
-}
-
-func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- dups := make(map[string]struct{})
- getDups := func(k, v []byte) error {
- dups[string(k)] = struct{}{}
- return nil
- }
- visitNoDup := func(k, v []byte) error {
- if _, ok := dups[string(k)]; ok {
- return nil
- }
- return visitor(k, v)
- }
- if err := rt.buf.ForEach(bucketName, getDups); err != nil {
- return err
- }
- rt.txmu.Lock()
- err := unsafeForEach(rt.tx, bucketName, visitNoDup)
- rt.txmu.Unlock()
- if err != nil {
- return err
- }
- return rt.buf.ForEach(bucketName, visitor)
-}
-
-func (rt *readTx) reset() {
- rt.buf.reset()
- rt.buckets = make(map[string]*bolt.Bucket)
- rt.tx = nil
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go
deleted file mode 100644
index 56e885d..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "sort"
-)
-
-// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
-type txBuffer struct {
- buckets map[string]*bucketBuffer
-}
-
-func (txb *txBuffer) reset() {
- for k, v := range txb.buckets {
- if v.used == 0 {
- // demote
- delete(txb.buckets, k)
- }
- v.used = 0
- }
-}
-
-// txWriteBuffer buffers writes of pending updates that have not yet committed.
-type txWriteBuffer struct {
- txBuffer
- seq bool
-}
-
-func (txw *txWriteBuffer) put(bucket, k, v []byte) {
- txw.seq = false
- txw.putSeq(bucket, k, v)
-}
-
-func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) {
- b, ok := txw.buckets[string(bucket)]
- if !ok {
- b = newBucketBuffer()
- txw.buckets[string(bucket)] = b
- }
- b.add(k, v)
-}
-
-func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
- for k, wb := range txw.buckets {
- rb, ok := txr.buckets[k]
- if !ok {
- delete(txw.buckets, k)
- txr.buckets[k] = wb
- continue
- }
- if !txw.seq && wb.used > 1 {
- // assume no duplicate keys
- sort.Sort(wb)
- }
- rb.merge(wb)
- }
- txw.reset()
-}
-
-// txReadBuffer accesses buffered updates.
-type txReadBuffer struct{ txBuffer }
-
-func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- if b := txr.buckets[string(bucketName)]; b != nil {
- return b.Range(key, endKey, limit)
- }
- return nil, nil
-}
-
-func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- if b := txr.buckets[string(bucketName)]; b != nil {
- return b.ForEach(visitor)
- }
- return nil
-}
-
-type kv struct {
- key []byte
- val []byte
-}
-
-// bucketBuffer buffers key-value pairs that are pending commit.
-type bucketBuffer struct {
- buf []kv
- // used tracks number of elements in use so buf can be reused without reallocation.
- used int
-}
-
-func newBucketBuffer() *bucketBuffer {
- return &bucketBuffer{buf: make([]kv, 512), used: 0}
-}
-
-func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
- f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
- idx := sort.Search(bb.used, f)
- if idx < 0 {
- return nil, nil
- }
- if len(endKey) == 0 {
- if bytes.Equal(key, bb.buf[idx].key) {
- keys = append(keys, bb.buf[idx].key)
- vals = append(vals, bb.buf[idx].val)
- }
- return keys, vals
- }
- if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
- return nil, nil
- }
- for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
- if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
- break
- }
- keys = append(keys, bb.buf[i].key)
- vals = append(vals, bb.buf[i].val)
- }
- return keys, vals
-}
-
-func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
- for i := 0; i < bb.used; i++ {
- if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (bb *bucketBuffer) add(k, v []byte) {
- bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
- bb.used++
- if bb.used == len(bb.buf) {
- buf := make([]kv, (3*len(bb.buf))/2)
- copy(buf, bb.buf)
- bb.buf = buf
- }
-}
-
-// merge merges data from bb into bbsrc.
-func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
- for i := 0; i < bbsrc.used; i++ {
- bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
- }
- if bb.used == bbsrc.used {
- return
- }
- if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
- return
- }
-
- sort.Stable(bb)
-
- // remove duplicates, using only newest update
- widx := 0
- for ridx := 1; ridx < bb.used; ridx++ {
- if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
- widx++
- }
- bb.buf[widx] = bb.buf[ridx]
- }
- bb.used = widx + 1
-}
-
-func (bb *bucketBuffer) Len() int { return bb.used }
-func (bb *bucketBuffer) Less(i, j int) bool {
- return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
-}
-func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/github.com/coreos/etcd/mvcc/doc.go
deleted file mode 100644
index ad5be03..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package mvcc defines etcd's stable MVCC storage.
-package mvcc
diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go
deleted file mode 100644
index b27a9e5..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/index.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sort"
- "sync"
-
- "github.com/google/btree"
-)
-
-type index interface {
- Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
- Range(key, end []byte, atRev int64) ([][]byte, []revision)
- Revisions(key, end []byte, atRev int64) []revision
- Put(key []byte, rev revision)
- Tombstone(key []byte, rev revision) error
- RangeSince(key, end []byte, rev int64) []revision
- Compact(rev int64) map[revision]struct{}
- Keep(rev int64) map[revision]struct{}
- Equal(b index) bool
-
- Insert(ki *keyIndex)
- KeyIndex(ki *keyIndex) *keyIndex
-}
-
-type treeIndex struct {
- sync.RWMutex
- tree *btree.BTree
-}
-
-func newTreeIndex() index {
- return &treeIndex{
- tree: btree.New(32),
- }
-}
-
-func (ti *treeIndex) Put(key []byte, rev revision) {
- keyi := &keyIndex{key: key}
-
- ti.Lock()
- defer ti.Unlock()
- item := ti.tree.Get(keyi)
- if item == nil {
- keyi.put(rev.main, rev.sub)
- ti.tree.ReplaceOrInsert(keyi)
- return
- }
- okeyi := item.(*keyIndex)
- okeyi.put(rev.main, rev.sub)
-}
-
-func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
- keyi := &keyIndex{key: key}
- ti.RLock()
- defer ti.RUnlock()
- if keyi = ti.keyIndex(keyi); keyi == nil {
- return revision{}, revision{}, 0, ErrRevisionNotFound
- }
- return keyi.get(atRev)
-}
-
-func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
- ti.RLock()
- defer ti.RUnlock()
- return ti.keyIndex(keyi)
-}
-
-func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
- if item := ti.tree.Get(keyi); item != nil {
- return item.(*keyIndex)
- }
- return nil
-}
-
-func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex)) {
- keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
-
- ti.RLock()
- defer ti.RUnlock()
-
- ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
- if len(endi.key) > 0 && !item.Less(endi) {
- return false
- }
- f(item.(*keyIndex))
- return true
- })
-}
-
-func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) {
- if end == nil {
- rev, _, _, err := ti.Get(key, atRev)
- if err != nil {
- return nil
- }
- return []revision{rev}
- }
- ti.visit(key, end, func(ki *keyIndex) {
- if rev, _, _, err := ki.get(atRev); err == nil {
- revs = append(revs, rev)
- }
- })
- return revs
-}
-
-func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
- if end == nil {
- rev, _, _, err := ti.Get(key, atRev)
- if err != nil {
- return nil, nil
- }
- return [][]byte{key}, []revision{rev}
- }
- ti.visit(key, end, func(ki *keyIndex) {
- if rev, _, _, err := ki.get(atRev); err == nil {
- revs = append(revs, rev)
- keys = append(keys, ki.key)
- }
- })
- return keys, revs
-}
-
-func (ti *treeIndex) Tombstone(key []byte, rev revision) error {
- keyi := &keyIndex{key: key}
-
- ti.Lock()
- defer ti.Unlock()
- item := ti.tree.Get(keyi)
- if item == nil {
- return ErrRevisionNotFound
- }
-
- ki := item.(*keyIndex)
- return ki.tombstone(rev.main, rev.sub)
-}
-
-// RangeSince returns all revisions from key(including) to end(excluding)
-// at or after the given rev. The returned slice is sorted in the order
-// of revision.
-func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
- keyi := &keyIndex{key: key}
-
- ti.RLock()
- defer ti.RUnlock()
-
- if end == nil {
- item := ti.tree.Get(keyi)
- if item == nil {
- return nil
- }
- keyi = item.(*keyIndex)
- return keyi.since(rev)
- }
-
- endi := &keyIndex{key: end}
- var revs []revision
- ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
- if len(endi.key) > 0 && !item.Less(endi) {
- return false
- }
- curKeyi := item.(*keyIndex)
- revs = append(revs, curKeyi.since(rev)...)
- return true
- })
- sort.Sort(revisions(revs))
-
- return revs
-}
-
-func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
- available := make(map[revision]struct{})
- var emptyki []*keyIndex
- plog.Printf("store.index: compact %d", rev)
- // TODO: do not hold the lock for long time?
- // This is probably OK. Compacting 10M keys takes O(10ms).
- ti.Lock()
- defer ti.Unlock()
- ti.tree.Ascend(compactIndex(rev, available, &emptyki))
- for _, ki := range emptyki {
- item := ti.tree.Delete(ki)
- if item == nil {
- plog.Panic("store.index: unexpected delete failure during compaction")
- }
- }
- return available
-}
-
-// Keep finds all revisions to be kept for a Compaction at the given rev.
-func (ti *treeIndex) Keep(rev int64) map[revision]struct{} {
- available := make(map[revision]struct{})
- ti.RLock()
- defer ti.RUnlock()
- ti.tree.Ascend(func(i btree.Item) bool {
- keyi := i.(*keyIndex)
- keyi.keep(rev, available)
- return true
- })
- return available
-}
-
-func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool {
- return func(i btree.Item) bool {
- keyi := i.(*keyIndex)
- keyi.compact(rev, available)
- if keyi.isEmpty() {
- *emptyki = append(*emptyki, keyi)
- }
- return true
- }
-}
-
-func (ti *treeIndex) Equal(bi index) bool {
- b := bi.(*treeIndex)
-
- if ti.tree.Len() != b.tree.Len() {
- return false
- }
-
- equal := true
-
- ti.tree.Ascend(func(item btree.Item) bool {
- aki := item.(*keyIndex)
- bki := b.tree.Get(item).(*keyIndex)
- if !aki.equal(bki) {
- equal = false
- return false
- }
- return true
- })
-
- return equal
-}
-
-func (ti *treeIndex) Insert(ki *keyIndex) {
- ti.Lock()
- defer ti.Unlock()
- ti.tree.ReplaceOrInsert(ki)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go
deleted file mode 100644
index 805922b..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/key_index.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/google/btree"
-)
-
-var (
- ErrRevisionNotFound = errors.New("mvcc: revision not found")
-)
-
-// keyIndex stores the revisions of a key in the backend.
-// Each keyIndex has at least one key generation.
-// Each generation might have several key versions.
-// Tombstone on a key appends an tombstone version at the end
-// of the current generation and creates a new empty generation.
-// Each version of a key has an index pointing to the backend.
-//
-// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo"
-// generate a keyIndex:
-// key: "foo"
-// rev: 5
-// generations:
-// {empty}
-// {4.0, 5.0(t)}
-// {1.0, 2.0, 3.0(t)}
-//
-// Compact a keyIndex removes the versions with smaller or equal to
-// rev except the largest one. If the generation becomes empty
-// during compaction, it will be removed. if all the generations get
-// removed, the keyIndex should be removed.
-//
-// For example:
-// compact(2) on the previous example
-// generations:
-// {empty}
-// {4.0, 5.0(t)}
-// {2.0, 3.0(t)}
-//
-// compact(4)
-// generations:
-// {empty}
-// {4.0, 5.0(t)}
-//
-// compact(5):
-// generations:
-// {empty} -> key SHOULD be removed.
-//
-// compact(6):
-// generations:
-// {empty} -> key SHOULD be removed.
-type keyIndex struct {
- key []byte
- modified revision // the main rev of the last modification
- generations []generation
-}
-
-// put puts a revision to the keyIndex.
-func (ki *keyIndex) put(main int64, sub int64) {
- rev := revision{main: main, sub: sub}
-
- if !rev.GreaterThan(ki.modified) {
- plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified)
- }
- if len(ki.generations) == 0 {
- ki.generations = append(ki.generations, generation{})
- }
- g := &ki.generations[len(ki.generations)-1]
- if len(g.revs) == 0 { // create a new key
- keysGauge.Inc()
- g.created = rev
- }
- g.revs = append(g.revs, rev)
- g.ver++
- ki.modified = rev
-}
-
-func (ki *keyIndex) restore(created, modified revision, ver int64) {
- if len(ki.generations) != 0 {
- plog.Panicf("store.keyindex: cannot restore non-empty keyIndex")
- }
-
- ki.modified = modified
- g := generation{created: created, ver: ver, revs: []revision{modified}}
- ki.generations = append(ki.generations, g)
- keysGauge.Inc()
-}
-
-// tombstone puts a revision, pointing to a tombstone, to the keyIndex.
-// It also creates a new empty generation in the keyIndex.
-// It returns ErrRevisionNotFound when tombstone on an empty generation.
-func (ki *keyIndex) tombstone(main int64, sub int64) error {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key))
- }
- if ki.generations[len(ki.generations)-1].isEmpty() {
- return ErrRevisionNotFound
- }
- ki.put(main, sub)
- ki.generations = append(ki.generations, generation{})
- keysGauge.Dec()
- return nil
-}
-
-// get gets the modified, created revision and version of the key that satisfies the given atRev.
-// Rev must be higher than or equal to the given atRev.
-func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
- }
- g := ki.findGeneration(atRev)
- if g.isEmpty() {
- return revision{}, revision{}, 0, ErrRevisionNotFound
- }
-
- n := g.walk(func(rev revision) bool { return rev.main > atRev })
- if n != -1 {
- return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil
- }
-
- return revision{}, revision{}, 0, ErrRevisionNotFound
-}
-
-// since returns revisions since the given rev. Only the revision with the
-// largest sub revision will be returned if multiple revisions have the same
-// main revision.
-func (ki *keyIndex) since(rev int64) []revision {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
- }
- since := revision{rev, 0}
- var gi int
- // find the generations to start checking
- for gi = len(ki.generations) - 1; gi > 0; gi-- {
- g := ki.generations[gi]
- if g.isEmpty() {
- continue
- }
- if since.GreaterThan(g.created) {
- break
- }
- }
-
- var revs []revision
- var last int64
- for ; gi < len(ki.generations); gi++ {
- for _, r := range ki.generations[gi].revs {
- if since.GreaterThan(r) {
- continue
- }
- if r.main == last {
- // replace the revision with a new one that has higher sub value,
- // because the original one should not be seen by external
- revs[len(revs)-1] = r
- continue
- }
- revs = append(revs, r)
- last = r.main
- }
- }
- return revs
-}
-
-// compact compacts a keyIndex by removing the versions with smaller or equal
-// revision than the given atRev except the largest one (If the largest one is
-// a tombstone, it will not be kept).
-// If a generation becomes empty during compaction, it will be removed.
-func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key))
- }
-
- genIdx, revIndex := ki.doCompact(atRev, available)
-
- g := &ki.generations[genIdx]
- if !g.isEmpty() {
- // remove the previous contents.
- if revIndex != -1 {
- g.revs = g.revs[revIndex:]
- }
- // remove any tombstone
- if len(g.revs) == 1 && genIdx != len(ki.generations)-1 {
- delete(available, g.revs[0])
- genIdx++
- }
- }
-
- // remove the previous generations.
- ki.generations = ki.generations[genIdx:]
-}
-
-// keep finds the revision to be kept if compact is called at given atRev.
-func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
- if ki.isEmpty() {
- return
- }
-
- genIdx, revIndex := ki.doCompact(atRev, available)
- g := &ki.generations[genIdx]
- if !g.isEmpty() {
- // remove any tombstone
- if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 {
- delete(available, g.revs[revIndex])
- }
- }
-}
-
-func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) {
- // walk until reaching the first revision smaller or equal to "atRev",
- // and add the revision to the available map
- f := func(rev revision) bool {
- if rev.main <= atRev {
- available[rev] = struct{}{}
- return false
- }
- return true
- }
-
- genIdx, g := 0, &ki.generations[0]
- // find first generation includes atRev or created after atRev
- for genIdx < len(ki.generations)-1 {
- if tomb := g.revs[len(g.revs)-1].main; tomb > atRev {
- break
- }
- genIdx++
- g = &ki.generations[genIdx]
- }
-
- revIndex = g.walk(f)
-
- return genIdx, revIndex
-}
-
-func (ki *keyIndex) isEmpty() bool {
- return len(ki.generations) == 1 && ki.generations[0].isEmpty()
-}
-
-// findGeneration finds out the generation of the keyIndex that the
-// given rev belongs to. If the given rev is at the gap of two generations,
-// which means that the key does not exist at the given rev, it returns nil.
-func (ki *keyIndex) findGeneration(rev int64) *generation {
- lastg := len(ki.generations) - 1
- cg := lastg
-
- for cg >= 0 {
- if len(ki.generations[cg].revs) == 0 {
- cg--
- continue
- }
- g := ki.generations[cg]
- if cg != lastg {
- if tomb := g.revs[len(g.revs)-1].main; tomb <= rev {
- return nil
- }
- }
- if g.revs[0].main <= rev {
- return &ki.generations[cg]
- }
- cg--
- }
- return nil
-}
-
-func (a *keyIndex) Less(b btree.Item) bool {
- return bytes.Compare(a.key, b.(*keyIndex).key) == -1
-}
-
-func (a *keyIndex) equal(b *keyIndex) bool {
- if !bytes.Equal(a.key, b.key) {
- return false
- }
- if a.modified != b.modified {
- return false
- }
- if len(a.generations) != len(b.generations) {
- return false
- }
- for i := range a.generations {
- ag, bg := a.generations[i], b.generations[i]
- if !ag.equal(bg) {
- return false
- }
- }
- return true
-}
-
-func (ki *keyIndex) String() string {
- var s string
- for _, g := range ki.generations {
- s += g.String()
- }
- return s
-}
-
-// generation contains multiple revisions of a key.
-type generation struct {
- ver int64
- created revision // when the generation is created (put in first revision).
- revs []revision
-}
-
-func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 }
-
-// walk walks through the revisions in the generation in descending order.
-// It passes the revision to the given function.
-// walk returns until: 1. it finishes walking all pairs 2. the function returns false.
-// walk returns the position at where it stopped. If it stopped after
-// finishing walking, -1 will be returned.
-func (g *generation) walk(f func(rev revision) bool) int {
- l := len(g.revs)
- for i := range g.revs {
- ok := f(g.revs[l-i-1])
- if !ok {
- return l - i - 1
- }
- }
- return -1
-}
-
-func (g *generation) String() string {
- return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs)
-}
-
-func (a generation) equal(b generation) bool {
- if a.ver != b.ver {
- return false
- }
- if len(a.revs) != len(b.revs) {
- return false
- }
-
- for i := range a.revs {
- ar, br := a.revs[i], b.revs[i]
- if ar != br {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go
deleted file mode 100644
index 2dad3ad..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kv.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-type RangeOptions struct {
- Limit int64
- Rev int64
- Count bool
-}
-
-type RangeResult struct {
- KVs []mvccpb.KeyValue
- Rev int64
- Count int
-}
-
-type ReadView interface {
- // FirstRev returns the first KV revision at the time of opening the txn.
- // After a compaction, the first revision increases to the compaction
- // revision.
- FirstRev() int64
-
- // Rev returns the revision of the KV at the time of opening the txn.
- Rev() int64
-
- // Range gets the keys in the range at rangeRev.
- // The returned rev is the current revision of the KV when the operation is executed.
- // If rangeRev <=0, range gets the keys at currentRev.
- // If `end` is nil, the request returns the key.
- // If `end` is not nil and not empty, it gets the keys in range [key, range_end).
- // If `end` is not nil and empty, it gets the keys greater than or equal to key.
- // Limit limits the number of keys returned.
- // If the required rev is compacted, ErrCompacted will be returned.
- Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error)
-}
-
-// TxnRead represents a read-only transaction with operations that will not
-// block other read transactions.
-type TxnRead interface {
- ReadView
- // End marks the transaction is complete and ready to commit.
- End()
-}
-
-type WriteView interface {
- // DeleteRange deletes the given range from the store.
- // A deleteRange increases the rev of the store if any key in the range exists.
- // The number of key deleted will be returned.
- // The returned rev is the current revision of the KV when the operation is executed.
- // It also generates one event for each key delete in the event history.
- // if the `end` is nil, deleteRange deletes the key.
- // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end).
- DeleteRange(key, end []byte) (n, rev int64)
-
- // Put puts the given key, value into the store. Put also takes additional argument lease to
- // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease
- // id.
- // A put also increases the rev of the store, and generates one event in the event history.
- // The returned rev is the current revision of the KV when the operation is executed.
- Put(key, value []byte, lease lease.LeaseID) (rev int64)
-}
-
-// TxnWrite represents a transaction that can modify the store.
-type TxnWrite interface {
- TxnRead
- WriteView
- // Changes gets the changes made since opening the write txn.
- Changes() []mvccpb.KeyValue
-}
-
-// txnReadWrite coerces a read txn to a write, panicking on any write operation.
-type txnReadWrite struct{ TxnRead }
-
-func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") }
-func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
- panic("unexpected Put")
-}
-func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
-
-func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
-
-type KV interface {
- ReadView
- WriteView
-
- // Read creates a read transaction.
- Read() TxnRead
-
- // Write creates a write transaction.
- Write() TxnWrite
-
- // Hash computes the hash of the KV's backend.
- Hash() (hash uint32, revision int64, err error)
-
- // HashByRev computes the hash of all MVCC revisions up to a given revision.
- HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
-
- // Compact frees all superseded keys with revisions less than rev.
- Compact(rev int64) (<-chan struct{}, error)
-
- // Commit commits outstanding txns into the underlying backend.
- Commit()
-
- // Restore restores the KV store from a backend.
- Restore(b backend.Backend) error
- Close() error
-}
-
-// WatchableKV is a KV that can be watched.
-type WatchableKV interface {
- KV
- Watchable
-}
-
-// Watchable is the interface that wraps the NewWatchStream function.
-type Watchable interface {
- // NewWatchStream returns a WatchStream that can be used to
- // watch events happened or happening on the KV.
- NewWatchStream() WatchStream
-}
-
-// ConsistentWatchableKV is a WatchableKV that understands the consistency
-// algorithm and consistent index.
-// If the consistent index of executing entry is not larger than the
-// consistent index of ConsistentWatchableKV, all operations in
-// this entry are skipped and return empty response.
-type ConsistentWatchableKV interface {
- WatchableKV
- // ConsistentIndex returns the current consistent index of the KV.
- ConsistentIndex() uint64
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go
deleted file mode 100644
index f40ba8e..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kv_view.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
-)
-
-type readView struct{ kv KV }
-
-func (rv *readView) FirstRev() int64 {
- tr := rv.kv.Read()
- defer tr.End()
- return tr.FirstRev()
-}
-
-func (rv *readView) Rev() int64 {
- tr := rv.kv.Read()
- defer tr.End()
- return tr.Rev()
-}
-
-func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- tr := rv.kv.Read()
- defer tr.End()
- return tr.Range(key, end, ro)
-}
-
-type writeView struct{ kv KV }
-
-func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) {
- tw := wv.kv.Write()
- defer tw.End()
- return tw.DeleteRange(key, end)
-}
-
-func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
- tw := wv.kv.Write()
- defer tw.End()
- return tw.Put(key, value, lease)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go
deleted file mode 100644
index dd9f04a..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go
+++ /dev/null
@@ -1,510 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "hash/crc32"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/pkg/schedule"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- keyBucketName = []byte("key")
- metaBucketName = []byte("meta")
-
- consistentIndexKeyName = []byte("consistent_index")
- scheduledCompactKeyName = []byte("scheduledCompactRev")
- finishedCompactKeyName = []byte("finishedCompactRev")
-
- ErrCompacted = errors.New("mvcc: required revision has been compacted")
- ErrFutureRev = errors.New("mvcc: required revision is a future revision")
- ErrCanceled = errors.New("mvcc: watcher is canceled")
- ErrClosed = errors.New("mvcc: closed")
-
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc")
-)
-
-const (
- // markedRevBytesLen is the byte length of marked revision.
- // The first `revBytesLen` bytes represents a normal revision. The last
- // one byte is the mark.
- markedRevBytesLen = revBytesLen + 1
- markBytePosition = markedRevBytesLen - 1
- markTombstone byte = 't'
-)
-
-var restoreChunkKeys = 10000 // non-const for testing
-
-// ConsistentIndexGetter is an interface that wraps the Get method.
-// Consistent index is the offset of an entry in a consistent replicated log.
-type ConsistentIndexGetter interface {
- // ConsistentIndex returns the consistent index of current executing entry.
- ConsistentIndex() uint64
-}
-
-type store struct {
- ReadView
- WriteView
-
- // consistentIndex caches the "consistent_index" key's value. Accessed
- // through atomics so must be 64-bit aligned.
- consistentIndex uint64
-
- // mu read locks for txns and write locks for non-txn store changes.
- mu sync.RWMutex
-
- ig ConsistentIndexGetter
-
- b backend.Backend
- kvindex index
-
- le lease.Lessor
-
- // revMuLock protects currentRev and compactMainRev.
- // Locked at end of write txn and released after write txn unlock lock.
- // Locked before locking read txn and released after locking.
- revMu sync.RWMutex
- // currentRev is the revision of the last completed transaction.
- currentRev int64
- // compactMainRev is the main revision of the last compaction.
- compactMainRev int64
-
- // bytesBuf8 is a byte slice of length 8
- // to avoid a repetitive allocation in saveIndex.
- bytesBuf8 []byte
-
- fifoSched schedule.Scheduler
-
- stopc chan struct{}
-}
-
-// NewStore returns a new store. It is useful to create a store inside
-// mvcc pkg. It should only be used for testing externally.
-func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {
- s := &store{
- b: b,
- ig: ig,
- kvindex: newTreeIndex(),
-
- le: le,
-
- currentRev: 1,
- compactMainRev: -1,
-
- bytesBuf8: make([]byte, 8),
- fifoSched: schedule.NewFIFOScheduler(),
-
- stopc: make(chan struct{}),
- }
- s.ReadView = &readView{s}
- s.WriteView = &writeView{s}
- if s.le != nil {
- s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
- }
-
- tx := s.b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket(keyBucketName)
- tx.UnsafeCreateBucket(metaBucketName)
- tx.Unlock()
- s.b.ForceCommit()
-
- if err := s.restore(); err != nil {
- // TODO: return the error instead of panic here?
- panic("failed to recover store from backend")
- }
-
- return s
-}
-
-func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
- if ctx == nil || ctx.Err() != nil {
- s.mu.Lock()
- select {
- case <-s.stopc:
- default:
- f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
- s.fifoSched.Schedule(f)
- }
- s.mu.Unlock()
- return
- }
- close(ch)
-}
-
-func (s *store) Hash() (hash uint32, revision int64, err error) {
- start := time.Now()
-
- s.b.ForceCommit()
- h, err := s.b.Hash(DefaultIgnores)
-
- hashDurations.Observe(time.Since(start).Seconds())
- return h, s.currentRev, err
-}
-
-func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
- start := time.Now()
-
- s.mu.RLock()
- s.revMu.RLock()
- compactRev, currentRev = s.compactMainRev, s.currentRev
- s.revMu.RUnlock()
-
- if rev > 0 && rev <= compactRev {
- s.mu.RUnlock()
- return 0, 0, compactRev, ErrCompacted
- } else if rev > 0 && rev > currentRev {
- s.mu.RUnlock()
- return 0, currentRev, 0, ErrFutureRev
- }
-
- if rev == 0 {
- rev = currentRev
- }
- keep := s.kvindex.Keep(rev)
-
- tx := s.b.ReadTx()
- tx.Lock()
- defer tx.Unlock()
- s.mu.RUnlock()
-
- upper := revision{main: rev + 1}
- lower := revision{main: compactRev + 1}
- h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
-
- h.Write(keyBucketName)
- err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
- kr := bytesToRev(k)
- if !upper.GreaterThan(kr) {
- return nil
- }
- // skip revisions that are scheduled for deletion
- // due to compacting; don't skip if there isn't one.
- if lower.GreaterThan(kr) && len(keep) > 0 {
- if _, ok := keep[kr]; !ok {
- return nil
- }
- }
- h.Write(k)
- h.Write(v)
- return nil
- })
- hash = h.Sum32()
-
- hashRevDurations.Observe(time.Since(start).Seconds())
- return hash, currentRev, compactRev, err
-}
-
-func (s *store) Compact(rev int64) (<-chan struct{}, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.revMu.Lock()
- defer s.revMu.Unlock()
-
- if rev <= s.compactMainRev {
- ch := make(chan struct{})
- f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
- s.fifoSched.Schedule(f)
- return ch, ErrCompacted
- }
- if rev > s.currentRev {
- return nil, ErrFutureRev
- }
-
- start := time.Now()
-
- s.compactMainRev = rev
-
- rbytes := newRevBytes()
- revToBytes(revision{main: rev}, rbytes)
-
- tx := s.b.BatchTx()
- tx.Lock()
- tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
- tx.Unlock()
- // ensure that desired compaction is persisted
- s.b.ForceCommit()
-
- keep := s.kvindex.Compact(rev)
- ch := make(chan struct{})
- var j = func(ctx context.Context) {
- if ctx.Err() != nil {
- s.compactBarrier(ctx, ch)
- return
- }
- if !s.scheduleCompaction(rev, keep) {
- s.compactBarrier(nil, ch)
- return
- }
- close(ch)
- }
-
- s.fifoSched.Schedule(j)
-
- indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond))
- return ch, nil
-}
-
-// DefaultIgnores is a map of keys to ignore in hash checking.
-var DefaultIgnores map[backend.IgnoreKey]struct{}
-
-func init() {
- DefaultIgnores = map[backend.IgnoreKey]struct{}{
- // consistent index might be changed due to v2 internal sync, which
- // is not controllable by the user.
- {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
- }
-}
-
-func (s *store) Commit() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- tx := s.b.BatchTx()
- tx.Lock()
- s.saveIndex(tx)
- tx.Unlock()
- s.b.ForceCommit()
-}
-
-func (s *store) Restore(b backend.Backend) error {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- close(s.stopc)
- s.fifoSched.Stop()
-
- atomic.StoreUint64(&s.consistentIndex, 0)
- s.b = b
- s.kvindex = newTreeIndex()
- s.currentRev = 1
- s.compactMainRev = -1
- s.fifoSched = schedule.NewFIFOScheduler()
- s.stopc = make(chan struct{})
-
- return s.restore()
-}
-
-func (s *store) restore() error {
- b := s.b
-
- reportDbTotalSizeInBytesMu.Lock()
- reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
- reportDbTotalSizeInBytesMu.Unlock()
- reportDbTotalSizeInUseInBytesMu.Lock()
- reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
- reportDbTotalSizeInUseInBytesMu.Unlock()
-
- min, max := newRevBytes(), newRevBytes()
- revToBytes(revision{main: 1}, min)
- revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
-
- keyToLease := make(map[string]lease.LeaseID)
-
- // restore index
- tx := s.b.BatchTx()
- tx.Lock()
-
- _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
- if len(finishedCompactBytes) != 0 {
- s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
- plog.Printf("restore compact to %d", s.compactMainRev)
- }
- _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
- scheduledCompact := int64(0)
- if len(scheduledCompactBytes) != 0 {
- scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
- }
-
- // index keys concurrently as they're loaded in from tx
- keysGauge.Set(0)
- rkvc, revc := restoreIntoIndex(s.kvindex)
- for {
- keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
- if len(keys) == 0 {
- break
- }
- // rkvc blocks if the total pending keys exceeds the restore
- // chunk size to keep keys from consuming too much memory.
- restoreChunk(rkvc, keys, vals, keyToLease)
- if len(keys) < restoreChunkKeys {
- // partial set implies final set
- break
- }
- // next set begins after where this one ended
- newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
- newMin.sub++
- revToBytes(newMin, min)
- }
- close(rkvc)
- s.currentRev = <-revc
-
- // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
- // the correct revision should be set to compaction revision in the case, not the largest revision
- // we have seen.
- if s.currentRev < s.compactMainRev {
- s.currentRev = s.compactMainRev
- }
- if scheduledCompact <= s.compactMainRev {
- scheduledCompact = 0
- }
-
- for key, lid := range keyToLease {
- if s.le == nil {
- panic("no lessor to attach lease")
- }
- err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
- if err != nil {
- plog.Errorf("unexpected Attach error: %v", err)
- }
- }
-
- tx.Unlock()
-
- if scheduledCompact != 0 {
- s.Compact(scheduledCompact)
- plog.Printf("resume scheduled compaction at %d", scheduledCompact)
- }
-
- return nil
-}
-
-type revKeyValue struct {
- key []byte
- kv mvccpb.KeyValue
- kstr string
-}
-
-func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) {
- rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
- go func() {
- currentRev := int64(1)
- defer func() { revc <- currentRev }()
- // restore the tree index from streaming the unordered index.
- kiCache := make(map[string]*keyIndex, restoreChunkKeys)
- for rkv := range rkvc {
- ki, ok := kiCache[rkv.kstr]
- // purge kiCache if many keys but still missing in the cache
- if !ok && len(kiCache) >= restoreChunkKeys {
- i := 10
- for k := range kiCache {
- delete(kiCache, k)
- if i--; i == 0 {
- break
- }
- }
- }
- // cache miss, fetch from tree index if there
- if !ok {
- ki = &keyIndex{key: rkv.kv.Key}
- if idxKey := idx.KeyIndex(ki); idxKey != nil {
- kiCache[rkv.kstr], ki = idxKey, idxKey
- ok = true
- }
- }
- rev := bytesToRev(rkv.key)
- currentRev = rev.main
- if ok {
- if isTombstone(rkv.key) {
- ki.tombstone(rev.main, rev.sub)
- continue
- }
- ki.put(rev.main, rev.sub)
- } else if !isTombstone(rkv.key) {
- ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
- idx.Insert(ki)
- kiCache[rkv.kstr] = ki
- }
- }
- }()
- return rkvc, revc
-}
-
-func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
- for i, key := range keys {
- rkv := revKeyValue{key: key}
- if err := rkv.kv.Unmarshal(vals[i]); err != nil {
- plog.Fatalf("cannot unmarshal event: %v", err)
- }
- rkv.kstr = string(rkv.kv.Key)
- if isTombstone(key) {
- delete(keyToLease, rkv.kstr)
- } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
- keyToLease[rkv.kstr] = lid
- } else {
- delete(keyToLease, rkv.kstr)
- }
- kvc <- rkv
- }
-}
-
-func (s *store) Close() error {
- close(s.stopc)
- s.fifoSched.Stop()
- return nil
-}
-
-func (s *store) saveIndex(tx backend.BatchTx) {
- if s.ig == nil {
- return
- }
- bs := s.bytesBuf8
- ci := s.ig.ConsistentIndex()
- binary.BigEndian.PutUint64(bs, ci)
- // put the index into the underlying backend
- // tx has been locked in TxnBegin, so there is no need to lock it again
- tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
- atomic.StoreUint64(&s.consistentIndex, ci)
-}
-
-func (s *store) ConsistentIndex() uint64 {
- if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 {
- return ci
- }
- tx := s.b.BatchTx()
- tx.Lock()
- defer tx.Unlock()
- _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
- if len(vs) == 0 {
- return 0
- }
- v := binary.BigEndian.Uint64(vs[0])
- atomic.StoreUint64(&s.consistentIndex, v)
- return v
-}
-
-// appendMarkTombstone appends tombstone mark to normal revision bytes.
-func appendMarkTombstone(b []byte) []byte {
- if len(b) != revBytesLen {
- plog.Panicf("cannot append mark to non normal revision bytes")
- }
- return append(b, markTombstone)
-}
-
-// isTombstone checks whether the revision bytes is a tombstone.
-func isTombstone(b []byte) bool {
- return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go
deleted file mode 100644
index 1726490..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "encoding/binary"
- "time"
-)
-
-func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
- totalStart := time.Now()
- defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond))
- keyCompactions := 0
- defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
-
- end := make([]byte, 8)
- binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
-
- batchsize := int64(10000)
- last := make([]byte, 8+1+8)
- for {
- var rev revision
-
- start := time.Now()
- tx := s.b.BatchTx()
- tx.Lock()
-
- keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize)
- for _, key := range keys {
- rev = bytesToRev(key)
- if _, ok := keep[rev]; !ok {
- tx.UnsafeDelete(keyBucketName, key)
- keyCompactions++
- }
- }
-
- if len(keys) < int(batchsize) {
- rbytes := make([]byte, 8+1+8)
- revToBytes(revision{main: compactMainRev}, rbytes)
- tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes)
- tx.Unlock()
- plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
- return true
- }
-
- // update last
- revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last)
- tx.Unlock()
- dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond))
-
- select {
- case <-time.After(100 * time.Millisecond):
- case <-s.stopc:
- return false
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go
deleted file mode 100644
index 8896fb8..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-type storeTxnRead struct {
- s *store
- tx backend.ReadTx
-
- firstRev int64
- rev int64
-}
-
-func (s *store) Read() TxnRead {
- s.mu.RLock()
- tx := s.b.ReadTx()
- s.revMu.RLock()
- tx.Lock()
- firstRev, rev := s.compactMainRev, s.currentRev
- s.revMu.RUnlock()
- return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev})
-}
-
-func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev }
-func (tr *storeTxnRead) Rev() int64 { return tr.rev }
-
-func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- return tr.rangeKeys(key, end, tr.Rev(), ro)
-}
-
-func (tr *storeTxnRead) End() {
- tr.tx.Unlock()
- tr.s.mu.RUnlock()
-}
-
-type storeTxnWrite struct {
- storeTxnRead
- tx backend.BatchTx
- // beginRev is the revision where the txn begins; it will write to the next revision.
- beginRev int64
- changes []mvccpb.KeyValue
-}
-
-func (s *store) Write() TxnWrite {
- s.mu.RLock()
- tx := s.b.BatchTx()
- tx.Lock()
- tw := &storeTxnWrite{
- storeTxnRead: storeTxnRead{s, tx, 0, 0},
- tx: tx,
- beginRev: s.currentRev,
- changes: make([]mvccpb.KeyValue, 0, 4),
- }
- return newMetricsTxnWrite(tw)
-}
-
-func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev }
-
-func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- rev := tw.beginRev
- if len(tw.changes) > 0 {
- rev++
- }
- return tw.rangeKeys(key, end, rev, ro)
-}
-
-func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
- if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
- return n, int64(tw.beginRev + 1)
- }
- return 0, int64(tw.beginRev)
-}
-
-func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
- tw.put(key, value, lease)
- return int64(tw.beginRev + 1)
-}
-
-func (tw *storeTxnWrite) End() {
- // only update index if the txn modifies the mvcc state.
- if len(tw.changes) != 0 {
- tw.s.saveIndex(tw.tx)
- // hold revMu lock to prevent new read txns from opening until writeback.
- tw.s.revMu.Lock()
- tw.s.currentRev++
- }
- tw.tx.Unlock()
- if len(tw.changes) != 0 {
- tw.s.revMu.Unlock()
- }
- tw.s.mu.RUnlock()
-}
-
-func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
- rev := ro.Rev
- if rev > curRev {
- return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
- }
- if rev <= 0 {
- rev = curRev
- }
- if rev < tr.s.compactMainRev {
- return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
- }
-
- revpairs := tr.s.kvindex.Revisions(key, end, int64(rev))
- if len(revpairs) == 0 {
- return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil
- }
- if ro.Count {
- return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil
- }
-
- limit := int(ro.Limit)
- if limit <= 0 || limit > len(revpairs) {
- limit = len(revpairs)
- }
-
- kvs := make([]mvccpb.KeyValue, limit)
- revBytes := newRevBytes()
- for i, revpair := range revpairs[:len(kvs)] {
- revToBytes(revpair, revBytes)
- _, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0)
- if len(vs) != 1 {
- plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub)
- }
- if err := kvs[i].Unmarshal(vs[0]); err != nil {
- plog.Fatalf("cannot unmarshal event: %v", err)
- }
- }
- return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil
-}
-
-func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
- rev := tw.beginRev + 1
- c := rev
- oldLease := lease.NoLease
-
- // if the key exists before, use its previous created and
- // get its previous leaseID
- _, created, ver, err := tw.s.kvindex.Get(key, rev)
- if err == nil {
- c = created.main
- oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
- }
-
- ibytes := newRevBytes()
- idxRev := revision{main: rev, sub: int64(len(tw.changes))}
- revToBytes(idxRev, ibytes)
-
- ver = ver + 1
- kv := mvccpb.KeyValue{
- Key: key,
- Value: value,
- CreateRevision: c,
- ModRevision: rev,
- Version: ver,
- Lease: int64(leaseID),
- }
-
- d, err := kv.Marshal()
- if err != nil {
- plog.Fatalf("cannot marshal event: %v", err)
- }
-
- tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
- tw.s.kvindex.Put(key, idxRev)
- tw.changes = append(tw.changes, kv)
-
- if oldLease != lease.NoLease {
- if tw.s.le == nil {
- panic("no lessor to detach lease")
- }
- err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
- if err != nil {
- plog.Errorf("unexpected error from lease detach: %v", err)
- }
- }
- if leaseID != lease.NoLease {
- if tw.s.le == nil {
- panic("no lessor to attach lease")
- }
- err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
- if err != nil {
- panic("unexpected error from lease Attach")
- }
- }
-}
-
-func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
- rrev := tw.beginRev
- if len(tw.changes) > 0 {
- rrev += 1
- }
- keys, revs := tw.s.kvindex.Range(key, end, rrev)
- if len(keys) == 0 {
- return 0
- }
- for i, key := range keys {
- tw.delete(key, revs[i])
- }
- return int64(len(keys))
-}
-
-func (tw *storeTxnWrite) delete(key []byte, rev revision) {
- ibytes := newRevBytes()
- idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))}
- revToBytes(idxRev, ibytes)
- ibytes = appendMarkTombstone(ibytes)
-
- kv := mvccpb.KeyValue{Key: key}
-
- d, err := kv.Marshal()
- if err != nil {
- plog.Fatalf("cannot marshal event: %v", err)
- }
-
- tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
- err = tw.s.kvindex.Tombstone(key, idxRev)
- if err != nil {
- plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err)
- }
- tw.changes = append(tw.changes, kv)
-
- item := lease.LeaseItem{Key: string(key)}
- leaseID := tw.s.le.GetLease(item)
-
- if leaseID != lease.NoLease {
- err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
- if err != nil {
- plog.Errorf("cannot detach %v", err)
- }
- }
-}
-
-func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go
deleted file mode 100644
index b753310..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/metrics.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sync"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- rangeCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "range_total",
- Help: "Total number of ranges seen by this member.",
- })
-
- putCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "put_total",
- Help: "Total number of puts seen by this member.",
- })
-
- deleteCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "delete_total",
- Help: "Total number of deletes seen by this member.",
- })
-
- txnCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "txn_total",
- Help: "Total number of txns seen by this member.",
- })
-
- keysGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "keys_total",
- Help: "Total number of keys.",
- })
-
- watchStreamGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "watch_stream_total",
- Help: "Total number of watch streams.",
- })
-
- watcherGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "watcher_total",
- Help: "Total number of watchers.",
- })
-
- slowWatcherGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "slow_watcher_total",
- Help: "Total number of unsynced slow watchers.",
- })
-
- totalEventsCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "events_total",
- Help: "Total number of events sent by this member.",
- })
-
- pendingEventsGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "pending_events_total",
- Help: "Total number of pending events to be sent.",
- })
-
- indexCompactionPauseDurations = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "index_compaction_pause_duration_milliseconds",
- Help: "Bucketed histogram of index compaction pause duration.",
- // 0.5ms -> 1second
- Buckets: prometheus.ExponentialBuckets(0.5, 2, 12),
- })
-
- dbCompactionPauseDurations = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_pause_duration_milliseconds",
- Help: "Bucketed histogram of db compaction pause duration.",
- // 1ms -> 4second
- Buckets: prometheus.ExponentialBuckets(1, 2, 13),
- })
-
- dbCompactionTotalDurations = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_total_duration_milliseconds",
- Help: "Bucketed histogram of db compaction total duration.",
- // 100ms -> 800second
- Buckets: prometheus.ExponentialBuckets(100, 2, 14),
- })
-
- dbCompactionKeysCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_keys_total",
- Help: "Total number of db keys compacted.",
- })
-
- dbTotalSizeDebugging = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_total_size_in_bytes",
- Help: "Total size of the underlying database physically allocated in bytes. Use etcd_mvcc_db_total_size_in_bytes",
- },
- func() float64 {
- reportDbTotalSizeInBytesMu.RLock()
- defer reportDbTotalSizeInBytesMu.RUnlock()
- return reportDbTotalSizeInBytes()
- },
- )
- dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "db_total_size_in_bytes",
- Help: "Total size of the underlying database physically allocated in bytes.",
- },
- func() float64 {
- reportDbTotalSizeInBytesMu.RLock()
- defer reportDbTotalSizeInBytesMu.RUnlock()
- return reportDbTotalSizeInBytes()
- },
- )
- // overridden by mvcc initialization
- reportDbTotalSizeInBytesMu sync.RWMutex
- reportDbTotalSizeInBytes = func() float64 { return 0 }
-
- dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "db_total_size_in_use_in_bytes",
- Help: "Total size of the underlying database logically in use in bytes.",
- },
- func() float64 {
- reportDbTotalSizeInUseInBytesMu.RLock()
- defer reportDbTotalSizeInUseInBytesMu.RUnlock()
- return reportDbTotalSizeInUseInBytes()
- },
- )
- // overridden by mvcc initialization
- reportDbTotalSizeInUseInBytesMu sync.RWMutex
- reportDbTotalSizeInUseInBytes func() float64 = func() float64 { return 0 }
-
- hashDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "hash_duration_seconds",
- Help: "The latency distribution of storage hash operation.",
-
- // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
- })
-
- hashRevDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "hash_rev_duration_seconds",
- Help: "The latency distribution of storage hash by revision operation.",
-
- // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
- })
-)
-
-func init() {
- prometheus.MustRegister(rangeCounter)
- prometheus.MustRegister(putCounter)
- prometheus.MustRegister(deleteCounter)
- prometheus.MustRegister(txnCounter)
- prometheus.MustRegister(keysGauge)
- prometheus.MustRegister(watchStreamGauge)
- prometheus.MustRegister(watcherGauge)
- prometheus.MustRegister(slowWatcherGauge)
- prometheus.MustRegister(totalEventsCounter)
- prometheus.MustRegister(pendingEventsGauge)
- prometheus.MustRegister(indexCompactionPauseDurations)
- prometheus.MustRegister(dbCompactionPauseDurations)
- prometheus.MustRegister(dbCompactionTotalDurations)
- prometheus.MustRegister(dbCompactionKeysCounter)
- prometheus.MustRegister(dbTotalSizeDebugging)
- prometheus.MustRegister(dbTotalSize)
- prometheus.MustRegister(dbTotalSizeInUse)
- prometheus.MustRegister(hashDurations)
- prometheus.MustRegister(hashRevDurations)
-}
-
-// ReportEventReceived reports that an event is received.
-// This function should be called when the external systems received an
-// event from mvcc.Watcher.
-func ReportEventReceived(n int) {
- pendingEventsGauge.Sub(float64(n))
- totalEventsCounter.Add(float64(n))
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go
deleted file mode 100644
index 911d648..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
-)
-
-type metricsTxnWrite struct {
- TxnWrite
- ranges uint
- puts uint
- deletes uint
-}
-
-func newMetricsTxnRead(tr TxnRead) TxnRead {
- return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0}
-}
-
-func newMetricsTxnWrite(tw TxnWrite) TxnWrite {
- return &metricsTxnWrite{tw, 0, 0, 0}
-}
-
-func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) {
- tw.ranges++
- return tw.TxnWrite.Range(key, end, ro)
-}
-
-func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) {
- tw.deletes++
- return tw.TxnWrite.DeleteRange(key, end)
-}
-
-func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
- tw.puts++
- return tw.TxnWrite.Put(key, value, lease)
-}
-
-func (tw *metricsTxnWrite) End() {
- defer tw.TxnWrite.End()
- if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 {
- txnCounter.Inc()
- }
- rangeCounter.Add(float64(tw.ranges))
- putCounter.Add(float64(tw.puts))
- deleteCounter.Add(float64(tw.deletes))
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
new file mode 100644
index 0000000..23fe337
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
@@ -0,0 +1,718 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: kv.proto
+
+/*
+ Package mvccpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ kv.proto
+
+ It has these top-level messages:
+ KeyValue
+ Event
+*/
+package mvccpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Event_EventType int32
+
+const (
+ PUT Event_EventType = 0
+ DELETE Event_EventType = 1
+)
+
+var Event_EventType_name = map[int32]string{
+ 0: "PUT",
+ 1: "DELETE",
+}
+var Event_EventType_value = map[string]int32{
+ "PUT": 0,
+ "DELETE": 1,
+}
+
+func (x Event_EventType) String() string {
+ return proto.EnumName(Event_EventType_name, int32(x))
+}
+func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} }
+
+type KeyValue struct {
+ // key is the key in bytes. An empty key is not allowed.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // create_revision is the revision of last creation on this key.
+ CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
+ // mod_revision is the revision of last modification on this key.
+ ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
+ // version is the version of the key. A deletion resets
+ // the version to zero and any modification of the key
+ // increases its version.
+ Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+ // value is the value held by the key, in bytes.
+ Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
+ // lease is the ID of the lease that attached to key.
+ // When the attached lease expires, the key will be deleted.
+ // If lease is 0, then no lease is attached to the key.
+ Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
+}
+
+func (m *KeyValue) Reset() { *m = KeyValue{} }
+func (m *KeyValue) String() string { return proto.CompactTextString(m) }
+func (*KeyValue) ProtoMessage() {}
+func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} }
+
+type Event struct {
+ // type is the kind of event. If type is a PUT, it indicates
+ // new data has been stored to the key. If type is a DELETE,
+ // it indicates the key was deleted.
+ Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
+ // kv holds the KeyValue for the event.
+ // A PUT event contains current kv pair.
+ // A PUT event with kv.Version=1 indicates the creation of a key.
+ // A DELETE/EXPIRE event contains the deleted key with
+ // its modification revision set to the revision of deletion.
+ Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
+ // prev_kv holds the key-value pair before the event happens.
+ PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
+}
+
+func (m *Event) Reset() { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage() {}
+func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} }
+
+func init() {
+ proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
+ proto.RegisterType((*Event)(nil), "mvccpb.Event")
+ proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
+}
+func (m *KeyValue) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if m.CreateRevision != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
+ }
+ if m.ModRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
+ }
+ if m.Version != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Version))
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ if m.Lease != 0 {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Lease))
+ }
+ return i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Type != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Type))
+ }
+ if m.Kv != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size()))
+ n1, err := m.Kv.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.PrevKv != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size()))
+ n2, err := m.PrevKv.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *KeyValue) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.CreateRevision != 0 {
+ n += 1 + sovKv(uint64(m.CreateRevision))
+ }
+ if m.ModRevision != 0 {
+ n += 1 + sovKv(uint64(m.ModRevision))
+ }
+ if m.Version != 0 {
+ n += 1 + sovKv(uint64(m.Version))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovKv(uint64(m.Lease))
+ }
+ return n
+}
+
+func (m *Event) Size() (n int) {
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovKv(uint64(m.Type))
+ }
+ if m.Kv != nil {
+ l = m.Kv.Size()
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.PrevKv != nil {
+ l = m.PrevKv.Size()
+ n += 1 + l + sovKv(uint64(l))
+ }
+ return n
+}
+
+func sovKv(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozKv(x uint64) (n int) {
+ return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *KeyValue) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+ }
+ m.CreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CreateRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+ }
+ m.ModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ModRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ m.Version = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Version |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipKv(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthKv
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Event: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (Event_EventType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kv == nil {
+ m.Kv = &KeyValue{}
+ }
+ if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PrevKv == nil {
+ m.PrevKv = &KeyValue{}
+ }
+ if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipKv(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthKv
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipKv(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthKv
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipKv(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) }
+
+var fileDescriptorKv = []byte{
+ // 303 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
+ 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
+ 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
+ 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
+ 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
+ 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
+ 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
+ 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
+ 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
+ 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
+ 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
+ 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
+ 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
+ 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
+ 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
+ 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
+ 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
+ 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
+ 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go
deleted file mode 100644
index 5fa35a1..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/revision.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import "encoding/binary"
-
-// revBytesLen is the byte length of a normal revision.
-// First 8 bytes is the revision.main in big-endian format. The 9th byte
-// is a '_'. The last 8 bytes is the revision.sub in big-endian format.
-const revBytesLen = 8 + 1 + 8
-
-// A revision indicates modification of the key-value space.
-// The set of changes that share same main revision changes the key-value space atomically.
-type revision struct {
- // main is the main revision of a set of changes that happen atomically.
- main int64
-
- // sub is the the sub revision of a change in a set of changes that happen
- // atomically. Each change has different increasing sub revision in that
- // set.
- sub int64
-}
-
-func (a revision) GreaterThan(b revision) bool {
- if a.main > b.main {
- return true
- }
- if a.main < b.main {
- return false
- }
- return a.sub > b.sub
-}
-
-func newRevBytes() []byte {
- return make([]byte, revBytesLen, markedRevBytesLen)
-}
-
-func revToBytes(rev revision, bytes []byte) {
- binary.BigEndian.PutUint64(bytes, uint64(rev.main))
- bytes[8] = '_'
- binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub))
-}
-
-func bytesToRev(bytes []byte) revision {
- return revision{
- main: int64(binary.BigEndian.Uint64(bytes[0:8])),
- sub: int64(binary.BigEndian.Uint64(bytes[9:])),
- }
-}
-
-type revisions []revision
-
-func (a revisions) Len() int { return len(a) }
-func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) }
-func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go
deleted file mode 100644
index 8a0df0b..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/util.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "encoding/binary"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-func UpdateConsistentIndex(be backend.Backend, index uint64) {
- tx := be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- var oldi uint64
- _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
- if len(vs) != 0 {
- oldi = binary.BigEndian.Uint64(vs[0])
- }
-
- if index <= oldi {
- return
- }
-
- bs := make([]byte, 8)
- binary.BigEndian.PutUint64(bs, index)
- tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
-}
-
-func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
- ibytes := newRevBytes()
- revToBytes(revision{main: kv.ModRevision}, ibytes)
-
- d, err := kv.Marshal()
- if err != nil {
- plog.Fatalf("cannot marshal event: %v", err)
- }
-
- be.BatchTx().Lock()
- be.BatchTx().UnsafePut(keyBucketName, ibytes, d)
- be.BatchTx().Unlock()
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go
deleted file mode 100644
index 78df193..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go
+++ /dev/null
@@ -1,534 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sync"
- "time"
-
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-// non-const so modifiable by tests
-var (
- // chanBufLen is the length of the buffered chan
- // for sending out watched events.
- // TODO: find a good buf value. 1024 is just a random one that
- // seems to be reasonable.
- chanBufLen = 1024
-
- // maxWatchersPerSync is the number of watchers to sync in a single batch
- maxWatchersPerSync = 512
-)
-
-type watchable interface {
- watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
- progress(w *watcher)
- rev() int64
-}
-
-type watchableStore struct {
- *store
-
- // mu protects watcher groups and batches. It should never be locked
- // before locking store.mu to avoid deadlock.
- mu sync.RWMutex
-
- // victims are watcher batches that were blocked on the watch channel
- victims []watcherBatch
- victimc chan struct{}
-
- // contains all unsynced watchers that needs to sync with events that have happened
- unsynced watcherGroup
-
- // contains all synced watchers that are in sync with the progress of the store.
- // The key of the map is the key that the watcher watches on.
- synced watcherGroup
-
- stopc chan struct{}
- wg sync.WaitGroup
-}
-
-// cancelFunc updates unsynced and synced maps when running
-// cancel operations.
-type cancelFunc func()
-
-func New(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) ConsistentWatchableKV {
- return newWatchableStore(b, le, ig)
-}
-
-func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *watchableStore {
- s := &watchableStore{
- store: NewStore(b, le, ig),
- victimc: make(chan struct{}, 1),
- unsynced: newWatcherGroup(),
- synced: newWatcherGroup(),
- stopc: make(chan struct{}),
- }
- s.store.ReadView = &readView{s}
- s.store.WriteView = &writeView{s}
- if s.le != nil {
- // use this store as the deleter so revokes trigger watch events
- s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
- }
- s.wg.Add(2)
- go s.syncWatchersLoop()
- go s.syncVictimsLoop()
- return s
-}
-
-func (s *watchableStore) Close() error {
- close(s.stopc)
- s.wg.Wait()
- return s.store.Close()
-}
-
-func (s *watchableStore) NewWatchStream() WatchStream {
- watchStreamGauge.Inc()
- return &watchStream{
- watchable: s,
- ch: make(chan WatchResponse, chanBufLen),
- cancels: make(map[WatchID]cancelFunc),
- watchers: make(map[WatchID]*watcher),
- }
-}
-
-func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
- wa := &watcher{
- key: key,
- end: end,
- minRev: startRev,
- id: id,
- ch: ch,
- fcs: fcs,
- }
-
- s.mu.Lock()
- s.revMu.RLock()
- synced := startRev > s.store.currentRev || startRev == 0
- if synced {
- wa.minRev = s.store.currentRev + 1
- if startRev > wa.minRev {
- wa.minRev = startRev
- }
- }
- if synced {
- s.synced.add(wa)
- } else {
- slowWatcherGauge.Inc()
- s.unsynced.add(wa)
- }
- s.revMu.RUnlock()
- s.mu.Unlock()
-
- watcherGauge.Inc()
-
- return wa, func() { s.cancelWatcher(wa) }
-}
-
-// cancelWatcher removes references of the watcher from the watchableStore
-func (s *watchableStore) cancelWatcher(wa *watcher) {
- for {
- s.mu.Lock()
- if s.unsynced.delete(wa) {
- slowWatcherGauge.Dec()
- break
- } else if s.synced.delete(wa) {
- break
- } else if wa.compacted {
- break
- } else if wa.ch == nil {
- // already canceled (e.g., cancel/close race)
- break
- }
-
- if !wa.victim {
- panic("watcher not victim but not in watch groups")
- }
-
- var victimBatch watcherBatch
- for _, wb := range s.victims {
- if wb[wa] != nil {
- victimBatch = wb
- break
- }
- }
- if victimBatch != nil {
- slowWatcherGauge.Dec()
- delete(victimBatch, wa)
- break
- }
-
- // victim being processed so not accessible; retry
- s.mu.Unlock()
- time.Sleep(time.Millisecond)
- }
-
- watcherGauge.Dec()
- wa.ch = nil
- s.mu.Unlock()
-}
-
-func (s *watchableStore) Restore(b backend.Backend) error {
- s.mu.Lock()
- defer s.mu.Unlock()
- err := s.store.Restore(b)
- if err != nil {
- return err
- }
-
- for wa := range s.synced.watchers {
- wa.restore = true
- s.unsynced.add(wa)
- }
- s.synced = newWatcherGroup()
- return nil
-}
-
-// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
-func (s *watchableStore) syncWatchersLoop() {
- defer s.wg.Done()
-
- for {
- s.mu.RLock()
- st := time.Now()
- lastUnsyncedWatchers := s.unsynced.size()
- s.mu.RUnlock()
-
- unsyncedWatchers := 0
- if lastUnsyncedWatchers > 0 {
- unsyncedWatchers = s.syncWatchers()
- }
- syncDuration := time.Since(st)
-
- waitDuration := 100 * time.Millisecond
- // more work pending?
- if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
- // be fair to other store operations by yielding time taken
- waitDuration = syncDuration
- }
-
- select {
- case <-time.After(waitDuration):
- case <-s.stopc:
- return
- }
- }
-}
-
-// syncVictimsLoop tries to write precomputed watcher responses to
-// watchers that had a blocked watcher channel
-func (s *watchableStore) syncVictimsLoop() {
- defer s.wg.Done()
-
- for {
- for s.moveVictims() != 0 {
- // try to update all victim watchers
- }
- s.mu.RLock()
- isEmpty := len(s.victims) == 0
- s.mu.RUnlock()
-
- var tickc <-chan time.Time
- if !isEmpty {
- tickc = time.After(10 * time.Millisecond)
- }
-
- select {
- case <-tickc:
- case <-s.victimc:
- case <-s.stopc:
- return
- }
- }
-}
-
-// moveVictims tries to update watches with already pending event data
-func (s *watchableStore) moveVictims() (moved int) {
- s.mu.Lock()
- victims := s.victims
- s.victims = nil
- s.mu.Unlock()
-
- var newVictim watcherBatch
- for _, wb := range victims {
- // try to send responses again
- for w, eb := range wb {
- // watcher has observed the store up to, but not including, w.minRev
- rev := w.minRev - 1
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- if newVictim == nil {
- newVictim = make(watcherBatch)
- }
- newVictim[w] = eb
- continue
- }
- moved++
- }
-
- // assign completed victim watchers to unsync/sync
- s.mu.Lock()
- s.store.revMu.RLock()
- curRev := s.store.currentRev
- for w, eb := range wb {
- if newVictim != nil && newVictim[w] != nil {
- // couldn't send watch response; stays victim
- continue
- }
- w.victim = false
- if eb.moreRev != 0 {
- w.minRev = eb.moreRev
- }
- if w.minRev <= curRev {
- s.unsynced.add(w)
- } else {
- slowWatcherGauge.Dec()
- s.synced.add(w)
- }
- }
- s.store.revMu.RUnlock()
- s.mu.Unlock()
- }
-
- if len(newVictim) > 0 {
- s.mu.Lock()
- s.victims = append(s.victims, newVictim)
- s.mu.Unlock()
- }
-
- return moved
-}
-
-// syncWatchers syncs unsynced watchers by:
-// 1. choose a set of watchers from the unsynced watcher group
-// 2. iterate over the set to get the minimum revision and remove compacted watchers
-// 3. use minimum revision to get all key-value pairs and send those events to watchers
-// 4. remove synced watchers in set from unsynced group and move to synced group
-func (s *watchableStore) syncWatchers() int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.unsynced.size() == 0 {
- return 0
- }
-
- s.store.revMu.RLock()
- defer s.store.revMu.RUnlock()
-
- // in order to find key-value pairs from unsynced watchers, we need to
- // find min revision index, and these revisions can be used to
- // query the backend store of key-value pairs
- curRev := s.store.currentRev
- compactionRev := s.store.compactMainRev
-
- wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
- minBytes, maxBytes := newRevBytes(), newRevBytes()
- revToBytes(revision{main: minRev}, minBytes)
- revToBytes(revision{main: curRev + 1}, maxBytes)
-
- // UnsafeRange returns keys and values. And in boltdb, keys are revisions.
- // values are actual key-value pairs in backend.
- tx := s.store.b.ReadTx()
- tx.Lock()
- revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0)
- evs := kvsToEvents(wg, revs, vs)
- tx.Unlock()
-
- var victims watcherBatch
- wb := newWatcherBatch(wg, evs)
- for w := range wg.watchers {
- w.minRev = curRev + 1
-
- eb, ok := wb[w]
- if !ok {
- // bring un-notified watcher to synced
- s.synced.add(w)
- s.unsynced.delete(w)
- continue
- }
-
- if eb.moreRev != 0 {
- w.minRev = eb.moreRev
- }
-
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- if victims == nil {
- victims = make(watcherBatch)
- }
- w.victim = true
- }
-
- if w.victim {
- victims[w] = eb
- } else {
- if eb.moreRev != 0 {
- // stay unsynced; more to read
- continue
- }
- s.synced.add(w)
- }
- s.unsynced.delete(w)
- }
- s.addVictim(victims)
-
- vsz := 0
- for _, v := range s.victims {
- vsz += len(v)
- }
- slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
-
- return s.unsynced.size()
-}
-
-// kvsToEvents gets all events for the watchers from all key-value pairs
-func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) {
- for i, v := range vals {
- var kv mvccpb.KeyValue
- if err := kv.Unmarshal(v); err != nil {
- plog.Panicf("cannot unmarshal event: %v", err)
- }
-
- if !wg.contains(string(kv.Key)) {
- continue
- }
-
- ty := mvccpb.PUT
- if isTombstone(revs[i]) {
- ty = mvccpb.DELETE
- // patch in mod revision so watchers won't skip
- kv.ModRevision = bytesToRev(revs[i]).main
- }
- evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
- }
- return evs
-}
-
-// notify notifies the fact that given event at the given rev just happened to
-// watchers that watch on the key of the event.
-func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
- var victim watcherBatch
- for w, eb := range newWatcherBatch(&s.synced, evs) {
- if eb.revs != 1 {
- plog.Panicf("unexpected multiple revisions in notification")
- }
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- // move slow watcher to victims
- w.minRev = rev + 1
- if victim == nil {
- victim = make(watcherBatch)
- }
- w.victim = true
- victim[w] = eb
- s.synced.delete(w)
- slowWatcherGauge.Inc()
- }
- }
- s.addVictim(victim)
-}
-
-func (s *watchableStore) addVictim(victim watcherBatch) {
- if victim == nil {
- return
- }
- s.victims = append(s.victims, victim)
- select {
- case s.victimc <- struct{}{}:
- default:
- }
-}
-
-func (s *watchableStore) rev() int64 { return s.store.Rev() }
-
-func (s *watchableStore) progress(w *watcher) {
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- if _, ok := s.synced.watchers[w]; ok {
- w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
- // If the ch is full, this watcher is receiving events.
- // We do not need to send progress at all.
- }
-}
-
-type watcher struct {
- // the watcher key
- key []byte
- // end indicates the end of the range to watch.
- // If end is set, the watcher is on a range.
- end []byte
-
- // victim is set when ch is blocked and undergoing victim processing
- victim bool
-
- // compacted is set when the watcher is removed because of compaction
- compacted bool
-
- // restore is true when the watcher is being restored from leader snapshot
- // which means that this watcher has just been moved from "synced" to "unsynced"
- // watcher group, possibly with a future revision when it was first added
- // to the synced watcher
- // "unsynced" watcher revision must always be <= current revision,
- // except when the watcher were to be moved from "synced" watcher group
- restore bool
-
- // minRev is the minimum revision update the watcher will accept
- minRev int64
- id WatchID
-
- fcs []FilterFunc
- // a chan to send out the watch response.
- // The chan might be shared with other watchers.
- ch chan<- WatchResponse
-}
-
-func (w *watcher) send(wr WatchResponse) bool {
- progressEvent := len(wr.Events) == 0
-
- if len(w.fcs) != 0 {
- ne := make([]mvccpb.Event, 0, len(wr.Events))
- for i := range wr.Events {
- filtered := false
- for _, filter := range w.fcs {
- if filter(wr.Events[i]) {
- filtered = true
- break
- }
- }
- if !filtered {
- ne = append(ne, wr.Events[i])
- }
- }
- wr.Events = ne
- }
-
- // if all events are filtered out, we should send nothing.
- if !progressEvent && len(wr.Events) == 0 {
- return true
- }
- select {
- case w.ch <- wr:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go
deleted file mode 100644
index 5c5bfda..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-func (tw *watchableStoreTxnWrite) End() {
- changes := tw.Changes()
- if len(changes) == 0 {
- tw.TxnWrite.End()
- return
- }
-
- rev := tw.Rev() + 1
- evs := make([]mvccpb.Event, len(changes))
- for i, change := range changes {
- evs[i].Kv = &changes[i]
- if change.CreateRevision == 0 {
- evs[i].Type = mvccpb.DELETE
- evs[i].Kv.ModRevision = rev
- } else {
- evs[i].Type = mvccpb.PUT
- }
- }
-
- // end write txn under watchable store lock so the updates are visible
- // when asynchronous event posting checks the current store revision
- tw.s.mu.Lock()
- tw.s.notify(rev, evs)
- tw.TxnWrite.End()
- tw.s.mu.Unlock()
-}
-
-type watchableStoreTxnWrite struct {
- TxnWrite
- s *watchableStore
-}
-
-func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} }
diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go
deleted file mode 100644
index bc0c632..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watcher.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "errors"
- "sync"
-
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-var (
- ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
-)
-
-type WatchID int64
-
-// FilterFunc returns true if the given event should be filtered out.
-type FilterFunc func(e mvccpb.Event) bool
-
-type WatchStream interface {
- // Watch creates a watcher. The watcher watches the events happening or
- // happened on the given key or range [key, end) from the given startRev.
- //
- // The whole event history can be watched unless compacted.
- // If `startRev` <=0, watch observes events after currentRev.
- //
- // The returned `id` is the ID of this watcher. It appears as WatchID
- // in events that are sent to the created watcher through stream channel.
- //
- Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID
-
- // Chan returns a chan. All watch response will be sent to the returned chan.
- Chan() <-chan WatchResponse
-
- // RequestProgress requests the progress of the watcher with given ID. The response
- // will only be sent if the watcher is currently synced.
- // The responses will be sent through the WatchRespone Chan attached
- // with this stream to ensure correct ordering.
- // The responses contains no events. The revision in the response is the progress
- // of the watchers since the watcher is currently synced.
- RequestProgress(id WatchID)
-
- // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
- // returned.
- Cancel(id WatchID) error
-
- // Close closes Chan and release all related resources.
- Close()
-
- // Rev returns the current revision of the KV the stream watches on.
- Rev() int64
-}
-
-type WatchResponse struct {
- // WatchID is the WatchID of the watcher this response sent to.
- WatchID WatchID
-
- // Events contains all the events that needs to send.
- Events []mvccpb.Event
-
- // Revision is the revision of the KV when the watchResponse is created.
- // For a normal response, the revision should be the same as the last
- // modified revision inside Events. For a delayed response to a unsynced
- // watcher, the revision is greater than the last modified revision
- // inside Events.
- Revision int64
-
- // CompactRevision is set when the watcher is cancelled due to compaction.
- CompactRevision int64
-}
-
-// watchStream contains a collection of watchers that share
-// one streaming chan to send out watched events and other control events.
-type watchStream struct {
- watchable watchable
- ch chan WatchResponse
-
- mu sync.Mutex // guards fields below it
- // nextID is the ID pre-allocated for next new watcher in this stream
- nextID WatchID
- closed bool
- cancels map[WatchID]cancelFunc
- watchers map[WatchID]*watcher
-}
-
-// Watch creates a new watcher in the stream and returns its WatchID.
-// TODO: return error if ws is closed?
-func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID {
- // prevent wrong range where key >= end lexicographically
- // watch request with 'WithFromKey' has empty-byte range end
- if len(end) != 0 && bytes.Compare(key, end) != -1 {
- return -1
- }
-
- ws.mu.Lock()
- defer ws.mu.Unlock()
- if ws.closed {
- return -1
- }
-
- id := ws.nextID
- ws.nextID++
-
- w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
-
- ws.cancels[id] = c
- ws.watchers[id] = w
- return id
-}
-
-func (ws *watchStream) Chan() <-chan WatchResponse {
- return ws.ch
-}
-
-func (ws *watchStream) Cancel(id WatchID) error {
- ws.mu.Lock()
- cancel, ok := ws.cancels[id]
- w := ws.watchers[id]
- ok = ok && !ws.closed
- ws.mu.Unlock()
-
- if !ok {
- return ErrWatcherNotExist
- }
- cancel()
-
- ws.mu.Lock()
- // The watch isn't removed until cancel so that if Close() is called,
- // it will wait for the cancel. Otherwise, Close() could close the
- // watch channel while the store is still posting events.
- if ww := ws.watchers[id]; ww == w {
- delete(ws.cancels, id)
- delete(ws.watchers, id)
- }
- ws.mu.Unlock()
-
- return nil
-}
-
-func (ws *watchStream) Close() {
- ws.mu.Lock()
- defer ws.mu.Unlock()
-
- for _, cancel := range ws.cancels {
- cancel()
- }
- ws.closed = true
- close(ws.ch)
- watchStreamGauge.Dec()
-}
-
-func (ws *watchStream) Rev() int64 {
- ws.mu.Lock()
- defer ws.mu.Unlock()
- return ws.watchable.rev()
-}
-
-func (ws *watchStream) RequestProgress(id WatchID) {
- ws.mu.Lock()
- w, ok := ws.watchers[id]
- ws.mu.Unlock()
- if !ok {
- return
- }
- ws.watchable.progress(w)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go
deleted file mode 100644
index b65c7bc..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "fmt"
- "math"
-
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/pkg/adt"
-)
-
-var (
- // watchBatchMaxRevs is the maximum distinct revisions that
- // may be sent to an unsynced watcher at a time. Declared as
- // var instead of const for testing purposes.
- watchBatchMaxRevs = 1000
-)
-
-type eventBatch struct {
- // evs is a batch of revision-ordered events
- evs []mvccpb.Event
- // revs is the minimum unique revisions observed for this batch
- revs int
- // moreRev is first revision with more events following this batch
- moreRev int64
-}
-
-func (eb *eventBatch) add(ev mvccpb.Event) {
- if eb.revs > watchBatchMaxRevs {
- // maxed out batch size
- return
- }
-
- if len(eb.evs) == 0 {
- // base case
- eb.revs = 1
- eb.evs = append(eb.evs, ev)
- return
- }
-
- // revision accounting
- ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision
- evRev := ev.Kv.ModRevision
- if evRev > ebRev {
- eb.revs++
- if eb.revs > watchBatchMaxRevs {
- eb.moreRev = evRev
- return
- }
- }
-
- eb.evs = append(eb.evs, ev)
-}
-
-type watcherBatch map[*watcher]*eventBatch
-
-func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
- eb := wb[w]
- if eb == nil {
- eb = &eventBatch{}
- wb[w] = eb
- }
- eb.add(ev)
-}
-
-// newWatcherBatch maps watchers to their matched events. It enables quick
-// events look up by watcher.
-func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
- if len(wg.watchers) == 0 {
- return nil
- }
-
- wb := make(watcherBatch)
- for _, ev := range evs {
- for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
- if ev.Kv.ModRevision >= w.minRev {
- // don't double notify
- wb.add(w, ev)
- }
- }
- }
- return wb
-}
-
-type watcherSet map[*watcher]struct{}
-
-func (w watcherSet) add(wa *watcher) {
- if _, ok := w[wa]; ok {
- panic("add watcher twice!")
- }
- w[wa] = struct{}{}
-}
-
-func (w watcherSet) union(ws watcherSet) {
- for wa := range ws {
- w.add(wa)
- }
-}
-
-func (w watcherSet) delete(wa *watcher) {
- if _, ok := w[wa]; !ok {
- panic("removing missing watcher!")
- }
- delete(w, wa)
-}
-
-type watcherSetByKey map[string]watcherSet
-
-func (w watcherSetByKey) add(wa *watcher) {
- set := w[string(wa.key)]
- if set == nil {
- set = make(watcherSet)
- w[string(wa.key)] = set
- }
- set.add(wa)
-}
-
-func (w watcherSetByKey) delete(wa *watcher) bool {
- k := string(wa.key)
- if v, ok := w[k]; ok {
- if _, ok := v[wa]; ok {
- delete(v, wa)
- if len(v) == 0 {
- // remove the set; nothing left
- delete(w, k)
- }
- return true
- }
- }
- return false
-}
-
-// watcherGroup is a collection of watchers organized by their ranges
-type watcherGroup struct {
- // keyWatchers has the watchers that watch on a single key
- keyWatchers watcherSetByKey
- // ranges has the watchers that watch a range; it is sorted by interval
- ranges adt.IntervalTree
- // watchers is the set of all watchers
- watchers watcherSet
-}
-
-func newWatcherGroup() watcherGroup {
- return watcherGroup{
- keyWatchers: make(watcherSetByKey),
- watchers: make(watcherSet),
- }
-}
-
-// add puts a watcher in the group.
-func (wg *watcherGroup) add(wa *watcher) {
- wg.watchers.add(wa)
- if wa.end == nil {
- wg.keyWatchers.add(wa)
- return
- }
-
- // interval already registered?
- ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
- if iv := wg.ranges.Find(ivl); iv != nil {
- iv.Val.(watcherSet).add(wa)
- return
- }
-
- // not registered, put in interval tree
- ws := make(watcherSet)
- ws.add(wa)
- wg.ranges.Insert(ivl, ws)
-}
-
-// contains is whether the given key has a watcher in the group.
-func (wg *watcherGroup) contains(key string) bool {
- _, ok := wg.keyWatchers[key]
- return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key))
-}
-
-// size gives the number of unique watchers in the group.
-func (wg *watcherGroup) size() int { return len(wg.watchers) }
-
-// delete removes a watcher from the group.
-func (wg *watcherGroup) delete(wa *watcher) bool {
- if _, ok := wg.watchers[wa]; !ok {
- return false
- }
- wg.watchers.delete(wa)
- if wa.end == nil {
- wg.keyWatchers.delete(wa)
- return true
- }
-
- ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
- iv := wg.ranges.Find(ivl)
- if iv == nil {
- return false
- }
-
- ws := iv.Val.(watcherSet)
- delete(ws, wa)
- if len(ws) == 0 {
- // remove interval missing watchers
- if ok := wg.ranges.Delete(ivl); !ok {
- panic("could not remove watcher from interval tree")
- }
- }
-
- return true
-}
-
-// choose selects watchers from the watcher group to update
-func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) {
- if len(wg.watchers) < maxWatchers {
- return wg, wg.chooseAll(curRev, compactRev)
- }
- ret := newWatcherGroup()
- for w := range wg.watchers {
- if maxWatchers <= 0 {
- break
- }
- maxWatchers--
- ret.add(w)
- }
- return &ret, ret.chooseAll(curRev, compactRev)
-}
-
-func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
- minRev := int64(math.MaxInt64)
- for w := range wg.watchers {
- if w.minRev > curRev {
- // after network partition, possibly choosing future revision watcher from restore operation
- // with watch key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2"
- // do not panic when such watcher had been moved from "synced" watcher during restore operation
- if !w.restore {
- panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev))
- }
-
- // mark 'restore' done, since it's chosen
- w.restore = false
- }
- if w.minRev < compactRev {
- select {
- case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}:
- w.compacted = true
- wg.delete(w)
- default:
- // retry next time
- }
- continue
- }
- if minRev > w.minRev {
- minRev = w.minRev
- }
- }
- return minRev
-}
-
-// watcherSetByKey gets the set of watchers that receive events on the given key.
-func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
- wkeys := wg.keyWatchers[key]
- wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
-
- // zero-copy cases
- switch {
- case len(wranges) == 0:
- // no need to merge ranges or copy; reuse single-key set
- return wkeys
- case len(wranges) == 0 && len(wkeys) == 0:
- return nil
- case len(wranges) == 1 && len(wkeys) == 0:
- return wranges[0].Val.(watcherSet)
- }
-
- // copy case
- ret := make(watcherSet)
- ret.union(wg.keyWatchers[key])
- for _, item := range wranges {
- ret.union(item.Val.(watcherSet))
- }
- return ret
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/adt/doc.go b/vendor/github.com/coreos/etcd/pkg/adt/doc.go
deleted file mode 100644
index 1a95591..0000000
--- a/vendor/github.com/coreos/etcd/pkg/adt/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package adt implements useful abstract data types.
-package adt
diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
deleted file mode 100644
index ec302e4..0000000
--- a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
+++ /dev/null
@@ -1,599 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adt
-
-import (
- "bytes"
- "math"
-)
-
-// Comparable is an interface for trichotomic comparisons.
-type Comparable interface {
- // Compare gives the result of a 3-way comparison
- // a.Compare(b) = 1 => a > b
- // a.Compare(b) = 0 => a == b
- // a.Compare(b) = -1 => a < b
- Compare(c Comparable) int
-}
-
-type rbcolor int
-
-const (
- black rbcolor = iota
- red
-)
-
-// Interval implements a Comparable interval [begin, end)
-// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
-type Interval struct {
- Begin Comparable
- End Comparable
-}
-
-// Compare on an interval gives == if the interval overlaps.
-func (ivl *Interval) Compare(c Comparable) int {
- ivl2 := c.(*Interval)
- ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin)
- ivbCmpEnd := ivl.Begin.Compare(ivl2.End)
- iveCmpBegin := ivl.End.Compare(ivl2.Begin)
-
- // ivl is left of ivl2
- if ivbCmpBegin < 0 && iveCmpBegin <= 0 {
- return -1
- }
-
- // iv is right of iv2
- if ivbCmpEnd >= 0 {
- return 1
- }
-
- return 0
-}
-
-type intervalNode struct {
- // iv is the interval-value pair entry.
- iv IntervalValue
- // max endpoint of all descendent nodes.
- max Comparable
- // left and right are sorted by low endpoint of key interval
- left, right *intervalNode
- // parent is the direct ancestor of the node
- parent *intervalNode
- c rbcolor
-}
-
-func (x *intervalNode) color() rbcolor {
- if x == nil {
- return black
- }
- return x.c
-}
-
-func (n *intervalNode) height() int {
- if n == nil {
- return 0
- }
- ld := n.left.height()
- rd := n.right.height()
- if ld < rd {
- return rd + 1
- }
- return ld + 1
-}
-
-func (x *intervalNode) min() *intervalNode {
- for x.left != nil {
- x = x.left
- }
- return x
-}
-
-// successor is the next in-order node in the tree
-func (x *intervalNode) successor() *intervalNode {
- if x.right != nil {
- return x.right.min()
- }
- y := x.parent
- for y != nil && x == y.right {
- x = y
- y = y.parent
- }
- return y
-}
-
-// updateMax updates the maximum values for a node and its ancestors
-func (x *intervalNode) updateMax() {
- for x != nil {
- oldmax := x.max
- max := x.iv.Ivl.End
- if x.left != nil && x.left.max.Compare(max) > 0 {
- max = x.left.max
- }
- if x.right != nil && x.right.max.Compare(max) > 0 {
- max = x.right.max
- }
- if oldmax.Compare(max) == 0 {
- break
- }
- x.max = max
- x = x.parent
- }
-}
-
-type nodeVisitor func(n *intervalNode) bool
-
-// visit will call a node visitor on each node that overlaps the given interval
-func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool {
- if x == nil {
- return true
- }
- v := iv.Compare(&x.iv.Ivl)
- switch {
- case v < 0:
- if !x.left.visit(iv, nv) {
- return false
- }
- case v > 0:
- maxiv := Interval{x.iv.Ivl.Begin, x.max}
- if maxiv.Compare(iv) == 0 {
- if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) {
- return false
- }
- }
- default:
- if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) {
- return false
- }
- }
- return true
-}
-
-type IntervalValue struct {
- Ivl Interval
- Val interface{}
-}
-
-// IntervalTree represents a (mostly) textbook implementation of the
-// "Introduction to Algorithms" (Cormen et al, 2nd ed.) chapter 13 red-black tree
-// and chapter 14.3 interval tree with search supporting "stabbing queries".
-type IntervalTree struct {
- root *intervalNode
- count int
-}
-
-// Delete removes the node with the given interval from the tree, returning
-// true if a node is in fact removed.
-func (ivt *IntervalTree) Delete(ivl Interval) bool {
- z := ivt.find(ivl)
- if z == nil {
- return false
- }
-
- y := z
- if z.left != nil && z.right != nil {
- y = z.successor()
- }
-
- x := y.left
- if x == nil {
- x = y.right
- }
- if x != nil {
- x.parent = y.parent
- }
-
- if y.parent == nil {
- ivt.root = x
- } else {
- if y == y.parent.left {
- y.parent.left = x
- } else {
- y.parent.right = x
- }
- y.parent.updateMax()
- }
- if y != z {
- z.iv = y.iv
- z.updateMax()
- }
-
- if y.color() == black && x != nil {
- ivt.deleteFixup(x)
- }
-
- ivt.count--
- return true
-}
-
-func (ivt *IntervalTree) deleteFixup(x *intervalNode) {
- for x != ivt.root && x.color() == black && x.parent != nil {
- if x == x.parent.left {
- w := x.parent.right
- if w.color() == red {
- w.c = black
- x.parent.c = red
- ivt.rotateLeft(x.parent)
- w = x.parent.right
- }
- if w == nil {
- break
- }
- if w.left.color() == black && w.right.color() == black {
- w.c = red
- x = x.parent
- } else {
- if w.right.color() == black {
- w.left.c = black
- w.c = red
- ivt.rotateRight(w)
- w = x.parent.right
- }
- w.c = x.parent.color()
- x.parent.c = black
- w.right.c = black
- ivt.rotateLeft(x.parent)
- x = ivt.root
- }
- } else {
- // same as above but with left and right exchanged
- w := x.parent.left
- if w.color() == red {
- w.c = black
- x.parent.c = red
- ivt.rotateRight(x.parent)
- w = x.parent.left
- }
- if w == nil {
- break
- }
- if w.left.color() == black && w.right.color() == black {
- w.c = red
- x = x.parent
- } else {
- if w.left.color() == black {
- w.right.c = black
- w.c = red
- ivt.rotateLeft(w)
- w = x.parent.left
- }
- w.c = x.parent.color()
- x.parent.c = black
- w.left.c = black
- ivt.rotateRight(x.parent)
- x = ivt.root
- }
- }
- }
- if x != nil {
- x.c = black
- }
-}
-
-// Insert adds a node with the given interval into the tree.
-func (ivt *IntervalTree) Insert(ivl Interval, val interface{}) {
- var y *intervalNode
- z := &intervalNode{iv: IntervalValue{ivl, val}, max: ivl.End, c: red}
- x := ivt.root
- for x != nil {
- y = x
- if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
- x = x.left
- } else {
- x = x.right
- }
- }
-
- z.parent = y
- if y == nil {
- ivt.root = z
- } else {
- if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
- y.left = z
- } else {
- y.right = z
- }
- y.updateMax()
- }
- z.c = red
- ivt.insertFixup(z)
- ivt.count++
-}
-
-func (ivt *IntervalTree) insertFixup(z *intervalNode) {
- for z.parent != nil && z.parent.parent != nil && z.parent.color() == red {
- if z.parent == z.parent.parent.left {
- y := z.parent.parent.right
- if y.color() == red {
- y.c = black
- z.parent.c = black
- z.parent.parent.c = red
- z = z.parent.parent
- } else {
- if z == z.parent.right {
- z = z.parent
- ivt.rotateLeft(z)
- }
- z.parent.c = black
- z.parent.parent.c = red
- ivt.rotateRight(z.parent.parent)
- }
- } else {
- // same as then with left/right exchanged
- y := z.parent.parent.left
- if y.color() == red {
- y.c = black
- z.parent.c = black
- z.parent.parent.c = red
- z = z.parent.parent
- } else {
- if z == z.parent.left {
- z = z.parent
- ivt.rotateRight(z)
- }
- z.parent.c = black
- z.parent.parent.c = red
- ivt.rotateLeft(z.parent.parent)
- }
- }
- }
- ivt.root.c = black
-}
-
-// rotateLeft moves x so it is left of its right child
-func (ivt *IntervalTree) rotateLeft(x *intervalNode) {
- y := x.right
- x.right = y.left
- if y.left != nil {
- y.left.parent = x
- }
- x.updateMax()
- ivt.replaceParent(x, y)
- y.left = x
- y.updateMax()
-}
-
-// rotateLeft moves x so it is right of its left child
-func (ivt *IntervalTree) rotateRight(x *intervalNode) {
- if x == nil {
- return
- }
- y := x.left
- x.left = y.right
- if y.right != nil {
- y.right.parent = x
- }
- x.updateMax()
- ivt.replaceParent(x, y)
- y.right = x
- y.updateMax()
-}
-
-// replaceParent replaces x's parent with y
-func (ivt *IntervalTree) replaceParent(x *intervalNode, y *intervalNode) {
- y.parent = x.parent
- if x.parent == nil {
- ivt.root = y
- } else {
- if x == x.parent.left {
- x.parent.left = y
- } else {
- x.parent.right = y
- }
- x.parent.updateMax()
- }
- x.parent = y
-}
-
-// Len gives the number of elements in the tree
-func (ivt *IntervalTree) Len() int { return ivt.count }
-
-// Height is the number of levels in the tree; one node has height 1.
-func (ivt *IntervalTree) Height() int { return ivt.root.height() }
-
-// MaxHeight is the expected maximum tree height given the number of nodes
-func (ivt *IntervalTree) MaxHeight() int {
- return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
-}
-
-// IntervalVisitor is used on tree searches; return false to stop searching.
-type IntervalVisitor func(n *IntervalValue) bool
-
-// Visit calls a visitor function on every tree node intersecting the given interval.
-// It will visit each interval [x, y) in ascending order sorted on x.
-func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
- ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) })
-}
-
-// find the exact node for a given interval
-func (ivt *IntervalTree) find(ivl Interval) (ret *intervalNode) {
- f := func(n *intervalNode) bool {
- if n.iv.Ivl != ivl {
- return true
- }
- ret = n
- return false
- }
- ivt.root.visit(&ivl, f)
- return ret
-}
-
-// Find gets the IntervalValue for the node matching the given interval
-func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) {
- n := ivt.find(ivl)
- if n == nil {
- return nil
- }
- return &n.iv
-}
-
-// Intersects returns true if there is some tree node intersecting the given interval.
-func (ivt *IntervalTree) Intersects(iv Interval) bool {
- x := ivt.root
- for x != nil && iv.Compare(&x.iv.Ivl) != 0 {
- if x.left != nil && x.left.max.Compare(iv.Begin) > 0 {
- x = x.left
- } else {
- x = x.right
- }
- }
- return x != nil
-}
-
-// Contains returns true if the interval tree's keys cover the entire given interval.
-func (ivt *IntervalTree) Contains(ivl Interval) bool {
- var maxEnd, minBegin Comparable
-
- isContiguous := true
- ivt.Visit(ivl, func(n *IntervalValue) bool {
- if minBegin == nil {
- minBegin = n.Ivl.Begin
- maxEnd = n.Ivl.End
- return true
- }
- if maxEnd.Compare(n.Ivl.Begin) < 0 {
- isContiguous = false
- return false
- }
- if n.Ivl.End.Compare(maxEnd) > 0 {
- maxEnd = n.Ivl.End
- }
- return true
- })
-
- return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0
-}
-
-// Stab returns a slice with all elements in the tree intersecting the interval.
-func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
- if ivt.count == 0 {
- return nil
- }
- f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
- ivt.Visit(iv, f)
- return ivs
-}
-
-// Union merges a given interval tree into the receiver.
-func (ivt *IntervalTree) Union(inIvt IntervalTree, ivl Interval) {
- f := func(n *IntervalValue) bool {
- ivt.Insert(n.Ivl, n.Val)
- return true
- }
- inIvt.Visit(ivl, f)
-}
-
-type StringComparable string
-
-func (s StringComparable) Compare(c Comparable) int {
- sc := c.(StringComparable)
- if s < sc {
- return -1
- }
- if s > sc {
- return 1
- }
- return 0
-}
-
-func NewStringInterval(begin, end string) Interval {
- return Interval{StringComparable(begin), StringComparable(end)}
-}
-
-func NewStringPoint(s string) Interval {
- return Interval{StringComparable(s), StringComparable(s + "\x00")}
-}
-
-// StringAffineComparable treats "" as > all other strings
-type StringAffineComparable string
-
-func (s StringAffineComparable) Compare(c Comparable) int {
- sc := c.(StringAffineComparable)
-
- if len(s) == 0 {
- if len(sc) == 0 {
- return 0
- }
- return 1
- }
- if len(sc) == 0 {
- return -1
- }
-
- if s < sc {
- return -1
- }
- if s > sc {
- return 1
- }
- return 0
-}
-
-func NewStringAffineInterval(begin, end string) Interval {
- return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
-}
-func NewStringAffinePoint(s string) Interval {
- return NewStringAffineInterval(s, s+"\x00")
-}
-
-func NewInt64Interval(a int64, b int64) Interval {
- return Interval{Int64Comparable(a), Int64Comparable(b)}
-}
-
-func NewInt64Point(a int64) Interval {
- return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
-}
-
-type Int64Comparable int64
-
-func (v Int64Comparable) Compare(c Comparable) int {
- vc := c.(Int64Comparable)
- cmp := v - vc
- if cmp < 0 {
- return -1
- }
- if cmp > 0 {
- return 1
- }
- return 0
-}
-
-// BytesAffineComparable treats empty byte arrays as > all other byte arrays
-type BytesAffineComparable []byte
-
-func (b BytesAffineComparable) Compare(c Comparable) int {
- bc := c.(BytesAffineComparable)
-
- if len(b) == 0 {
- if len(bc) == 0 {
- return 0
- }
- return 1
- }
- if len(bc) == 0 {
- return -1
- }
-
- return bytes.Compare(b, bc)
-}
-
-func NewBytesAffineInterval(begin, end []byte) Interval {
- return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)}
-}
-func NewBytesAffinePoint(b []byte) Interval {
- be := make([]byte, len(b)+1)
- copy(be, b)
- be[len(b)] = 0
- return NewBytesAffineInterval(b, be)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/contention/contention.go b/vendor/github.com/coreos/etcd/pkg/contention/contention.go
deleted file mode 100644
index 26ce9a2..0000000
--- a/vendor/github.com/coreos/etcd/pkg/contention/contention.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package contention
-
-import (
- "sync"
- "time"
-)
-
-// TimeoutDetector detects routine starvations by
-// observing the actual time duration to finish an action
-// or between two events that should happen in a fixed
-// interval. If the observed duration is longer than
-// the expectation, the detector will report the result.
-type TimeoutDetector struct {
- mu sync.Mutex // protects all
- maxDuration time.Duration
- // map from event to time
- // time is the last seen time of the event.
- records map[uint64]time.Time
-}
-
-// NewTimeoutDetector creates the TimeoutDetector.
-func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector {
- return &TimeoutDetector{
- maxDuration: maxDuration,
- records: make(map[uint64]time.Time),
- }
-}
-
-// Reset resets the NewTimeoutDetector.
-func (td *TimeoutDetector) Reset() {
- td.mu.Lock()
- defer td.mu.Unlock()
-
- td.records = make(map[uint64]time.Time)
-}
-
-// Observe observes an event for given id. It returns false and exceeded duration
-// if the interval is longer than the expectation.
-func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) {
- td.mu.Lock()
- defer td.mu.Unlock()
-
- ok := true
- now := time.Now()
- exceed := time.Duration(0)
-
- if pt, found := td.records[which]; found {
- exceed = now.Sub(pt) - td.maxDuration
- if exceed > 0 {
- ok = false
- }
- }
- td.records[which] = now
- return ok, exceed
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/contention/doc.go b/vendor/github.com/coreos/etcd/pkg/contention/doc.go
deleted file mode 100644
index daf4522..0000000
--- a/vendor/github.com/coreos/etcd/pkg/contention/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package contention provides facilities for detecting system contention.
-package contention
diff --git a/vendor/github.com/coreos/etcd/pkg/cors/cors.go b/vendor/github.com/coreos/etcd/pkg/cors/cors.go
deleted file mode 100644
index 0c64f16..0000000
--- a/vendor/github.com/coreos/etcd/pkg/cors/cors.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package cors handles cross-origin HTTP requests (CORS).
-package cors
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "sort"
- "strings"
-)
-
-type CORSInfo map[string]bool
-
-// Set implements the flag.Value interface to allow users to define a list of CORS origins
-func (ci *CORSInfo) Set(s string) error {
- m := make(map[string]bool)
- for _, v := range strings.Split(s, ",") {
- v = strings.TrimSpace(v)
- if v == "" {
- continue
- }
- if v != "*" {
- if _, err := url.Parse(v); err != nil {
- return fmt.Errorf("Invalid CORS origin: %s", err)
- }
- }
- m[v] = true
-
- }
- *ci = CORSInfo(m)
- return nil
-}
-
-func (ci *CORSInfo) String() string {
- o := make([]string, 0)
- for k := range *ci {
- o = append(o, k)
- }
- sort.StringSlice(o).Sort()
- return strings.Join(o, ",")
-}
-
-// OriginAllowed determines whether the server will allow a given CORS origin.
-func (c CORSInfo) OriginAllowed(origin string) bool {
- return c["*"] || c[origin]
-}
-
-type CORSHandler struct {
- Handler http.Handler
- Info *CORSInfo
-}
-
-// addHeader adds the correct cors headers given an origin
-func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) {
- w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
- w.Header().Add("Access-Control-Allow-Origin", origin)
- w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
-}
-
-// ServeHTTP adds the correct CORS headers based on the origin and returns immediately
-// with a 200 OK if the method is OPTIONS.
-func (h *CORSHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- // Write CORS header.
- if h.Info.OriginAllowed("*") {
- h.addHeader(w, "*")
- } else if origin := req.Header.Get("Origin"); h.Info.OriginAllowed(origin) {
- h.addHeader(w, origin)
- }
-
- if req.Method == "OPTIONS" {
- w.WriteHeader(http.StatusOK)
- return
- }
-
- h.Handler.ServeHTTP(w, req)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go
deleted file mode 100644
index 0323b2d..0000000
--- a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package cpuutil provides facilities for detecting cpu-specific features.
-package cpuutil
diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
deleted file mode 100644
index 6ab898d..0000000
--- a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cpuutil
-
-import (
- "encoding/binary"
- "unsafe"
-)
-
-const intWidth int = int(unsafe.Sizeof(0))
-
-var byteOrder binary.ByteOrder
-
-// ByteOrder returns the byte order for the CPU's native endianness.
-func ByteOrder() binary.ByteOrder { return byteOrder }
-
-func init() {
- var i int = 0x1
- if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
- byteOrder = binary.BigEndian
- } else {
- byteOrder = binary.LittleEndian
- }
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/github.com/coreos/etcd/pkg/crc/crc.go
deleted file mode 100644
index 4b998a4..0000000
--- a/vendor/github.com/coreos/etcd/pkg/crc/crc.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package crc provides utility function for cyclic redundancy check
-// algorithms.
-package crc
-
-import (
- "hash"
- "hash/crc32"
-)
-
-// The size of a CRC-32 checksum in bytes.
-const Size = 4
-
-type digest struct {
- crc uint32
- tab *crc32.Table
-}
-
-// New creates a new hash.Hash32 computing the CRC-32 checksum
-// using the polynomial represented by the Table.
-// Modified by xiangli to take a prevcrc.
-func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} }
-
-func (d *digest) Size() int { return Size }
-
-func (d *digest) BlockSize() int { return 1 }
-
-func (d *digest) Reset() { d.crc = 0 }
-
-func (d *digest) Write(p []byte) (n int, err error) {
- d.crc = crc32.Update(d.crc, d.tab, p)
- return len(p), nil
-}
-
-func (d *digest) Sum32() uint32 { return d.crc }
-
-func (d *digest) Sum(in []byte) []byte {
- s := d.Sum32()
- return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go
deleted file mode 100644
index 74499eb..0000000
--- a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package debugutil includes utility functions for debugging.
-package debugutil
diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go
deleted file mode 100644
index 8d5544a..0000000
--- a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package debugutil
-
-import (
- "net/http"
- "net/http/pprof"
- "runtime"
-)
-
-const HTTPPrefixPProf = "/debug/pprof"
-
-// PProfHandlers returns a map of pprof handlers keyed by the HTTP path.
-func PProfHandlers() map[string]http.Handler {
- // set only when there's no existing setting
- if runtime.SetMutexProfileFraction(-1) == 0 {
- // 1 out of 5 mutex events are reported, on average
- runtime.SetMutexProfileFraction(5)
- }
-
- m := make(map[string]http.Handler)
-
- m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index)
- m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile)
- m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol)
- m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline)
- m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace)
- m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap")
- m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine")
- m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate")
- m[HTTPPrefixPProf+"/block"] = pprof.Handler("block")
- m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex")
-
- return m
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go
deleted file mode 100644
index 58a77df..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package fileutil
-
-import "os"
-
-// OpenDir opens a directory for syncing.
-func OpenDir(path string) (*os.File, error) { return os.Open(path) }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go
deleted file mode 100644
index c123395..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-// OpenDir opens a directory in windows with write access for syncing.
-func OpenDir(path string) (*os.File, error) {
- fd, err := openDir(path)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func openDir(path string) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
- createmode := uint32(syscall.OPEN_EXISTING)
- fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
- return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
deleted file mode 100644
index fce5126..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package fileutil implements utility functions related to files and paths.
-package fileutil
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- // PrivateFileMode grants owner to read/write a file.
- PrivateFileMode = 0600
- // PrivateDirMode grants owner to make/remove files inside the directory.
- PrivateDirMode = 0700
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
-)
-
-// IsDirWriteable checks if dir is writable by writing and removing a file
-// to dir. It returns nil if dir is writable.
-func IsDirWriteable(dir string) error {
- f := filepath.Join(dir, ".touch")
- if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
- return err
- }
- return os.Remove(f)
-}
-
-// ReadDir returns the filenames in the given directory in sorted order.
-func ReadDir(dirpath string) ([]string, error) {
- dir, err := os.Open(dirpath)
- if err != nil {
- return nil, err
- }
- defer dir.Close()
- names, err := dir.Readdirnames(-1)
- if err != nil {
- return nil, err
- }
- sort.Strings(names)
- return names, nil
-}
-
-// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
-// does not exists. TouchDirAll also ensures the given directory is writable.
-func TouchDirAll(dir string) error {
- // If path is already a directory, MkdirAll does nothing
- // and returns nil.
- err := os.MkdirAll(dir, PrivateDirMode)
- if err != nil {
- // if mkdirAll("a/text") and "text" is not
- // a directory, this will return syscall.ENOTDIR
- return err
- }
- return IsDirWriteable(dir)
-}
-
-// CreateDirAll is similar to TouchDirAll but returns error
-// if the deepest directory was not empty.
-func CreateDirAll(dir string) error {
- err := TouchDirAll(dir)
- if err == nil {
- var ns []string
- ns, err = ReadDir(dir)
- if err != nil {
- return err
- }
- if len(ns) != 0 {
- err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
- }
- }
- return err
-}
-
-func Exist(name string) bool {
- _, err := os.Stat(name)
- return err == nil
-}
-
-// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
-// shorten the length of the file.
-func ZeroToEnd(f *os.File) error {
- // TODO: support FALLOC_FL_ZERO_RANGE
- off, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- lenf, lerr := f.Seek(0, io.SeekEnd)
- if lerr != nil {
- return lerr
- }
- if err = f.Truncate(off); err != nil {
- return err
- }
- // make sure blocks remain allocated
- if err = Preallocate(f, lenf, true); err != nil {
- return err
- }
- _, err = f.Seek(off, io.SeekStart)
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go
deleted file mode 100644
index 338627f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "errors"
- "os"
-)
-
-var (
- ErrLocked = errors.New("fileutil: file already locked")
-)
-
-type LockedFile struct{ *os.File }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go
deleted file mode 100644
index 542550b..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows,!plan9,!solaris
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
- f.Close()
- if err == syscall.EWOULDBLOCK {
- err = ErrLocked
- }
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go
deleted file mode 100644
index 939fea6..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package fileutil
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-// This used to call syscall.Flock() but that call fails with EBADF on NFS.
-// An alternative is lockf() which works on NFS but that call lets a process lock
-// the same file twice. Instead, use Linux's non-standard open file descriptor
-// locks which will block if the process already holds the file lock.
-//
-// constants from /usr/include/bits/fcntl-linux.h
-const (
- F_OFD_GETLK = 37
- F_OFD_SETLK = 37
- F_OFD_SETLKW = 38
-)
-
-var (
- wrlck = syscall.Flock_t{
- Type: syscall.F_WRLCK,
- Whence: int16(io.SeekStart),
- Start: 0,
- Len: 0,
- }
-
- linuxTryLockFile = flockTryLockFile
- linuxLockFile = flockLockFile
-)
-
-func init() {
- // use open file descriptor locks if the system supports it
- getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
- if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil {
- linuxTryLockFile = ofdTryLockFile
- linuxLockFile = ofdLockFile
- }
-}
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return linuxTryLockFile(path, flag, perm)
-}
-
-func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
-
- flock := wrlck
- if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil {
- f.Close()
- if err == syscall.EWOULDBLOCK {
- err = ErrLocked
- }
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return linuxLockFile(path, flag, perm)
-}
-
-func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
-
- flock := wrlck
- err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock)
-
- if err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
deleted file mode 100644
index fee6a7c..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "os"
- "syscall"
- "time"
-)
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
- return nil, err
- }
- f, err := os.Open(path, flag, perm)
- if err != nil {
- return nil, ErrLocked
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
- return nil, err
- }
- for {
- f, err := os.OpenFile(path, flag, perm)
- if err == nil {
- return &LockedFile{f}, nil
- }
- time.Sleep(10 * time.Millisecond)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
deleted file mode 100644
index 352ca55..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build solaris
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Pid = 0
- lock.Type = syscall.F_WRLCK
- lock.Whence = 0
- lock.Pid = 0
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
- f.Close()
- if err == syscall.EAGAIN {
- err = ErrLocked
- }
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Pid = 0
- lock.Type = syscall.F_WRLCK
- lock.Whence = 0
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
deleted file mode 100644
index ed01164..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows,!plan9,!solaris,!linux
-
-package fileutil
-
-import (
- "os"
-)
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return flockTryLockFile(path, flag, perm)
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return flockLockFile(path, flag, perm)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
deleted file mode 100644
index b181723..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package fileutil
-
-import (
- "errors"
- "fmt"
- "os"
- "syscall"
- "unsafe"
-)
-
-var (
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
- procLockFileEx = modkernel32.NewProc("LockFileEx")
-
- errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.")
-)
-
-const (
- // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
- LOCKFILE_EXCLUSIVE_LOCK = 2
- LOCKFILE_FAIL_IMMEDIATELY = 1
-
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
- errLockViolation syscall.Errno = 0x21
-)
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := open(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := open(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func open(path string, flag int, perm os.FileMode) (*os.File, error) {
- if path == "" {
- return nil, fmt.Errorf("cannot open empty filename")
- }
- var access uint32
- switch flag {
- case syscall.O_RDONLY:
- access = syscall.GENERIC_READ
- case syscall.O_WRONLY:
- access = syscall.GENERIC_WRITE
- case syscall.O_RDWR:
- access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
- case syscall.O_WRONLY | syscall.O_CREAT:
- access = syscall.GENERIC_ALL
- default:
- panic(fmt.Errorf("flag %v is not supported", flag))
- }
- fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
- access,
- syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
- nil,
- syscall.OPEN_ALWAYS,
- syscall.FILE_ATTRIBUTE_NORMAL,
- 0)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func lockFile(fd syscall.Handle, flags uint32) error {
- var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
- flag |= flags
- if fd == syscall.InvalidHandle {
- return nil
- }
- err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
- if err == nil {
- return nil
- } else if err.Error() == errLocked.Error() {
- return ErrLocked
- } else if err != errLockViolation {
- return err
- }
- return nil
-}
-
-func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- var reserved uint32 = 0
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
- if r1 == 0 {
- if e1 != 0 {
- err = error(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go
deleted file mode 100644
index c747b7c..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "io"
- "os"
-)
-
-// Preallocate tries to allocate the space for given
-// file. This operation is only supported on linux by a
-// few filesystems (btrfs, ext4, etc.).
-// If the operation is unsupported, no error will be returned.
-// Otherwise, the error encountered will be returned.
-func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
- if sizeInBytes == 0 {
- // fallocate will return EINVAL if length is 0; skip
- return nil
- }
- if extendFile {
- return preallocExtend(f, sizeInBytes)
- }
- return preallocFixed(f, sizeInBytes)
-}
-
-func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
- curOff, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- size, err := f.Seek(sizeInBytes, io.SeekEnd)
- if err != nil {
- return err
- }
- if _, err = f.Seek(curOff, io.SeekStart); err != nil {
- return err
- }
- if sizeInBytes > size {
- return nil
- }
- return f.Truncate(sizeInBytes)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go
deleted file mode 100644
index 5a6dccf..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build darwin
-
-package fileutil
-
-import (
- "os"
- "syscall"
- "unsafe"
-)
-
-func preallocExtend(f *os.File, sizeInBytes int64) error {
- if err := preallocFixed(f, sizeInBytes); err != nil {
- return err
- }
- return preallocExtendTrunc(f, sizeInBytes)
-}
-
-func preallocFixed(f *os.File, sizeInBytes int64) error {
- // allocate all requested space or no space at all
- // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
- fstore := &syscall.Fstore_t{
- Flags: syscall.F_ALLOCATEALL,
- Posmode: syscall.F_PEOFPOSMODE,
- Length: sizeInBytes}
- p := unsafe.Pointer(fstore)
- _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p))
- if errno == 0 || errno == syscall.ENOTSUP {
- return nil
- }
-
- // wrong argument to fallocate syscall
- if errno == syscall.EINVAL {
- // filesystem "st_blocks" are allocated in the units of
- // "Allocation Block Size" (run "diskutil info /" command)
- var stat syscall.Stat_t
- syscall.Fstat(int(f.Fd()), &stat)
-
- // syscall.Statfs_t.Bsize is "optimal transfer block size"
- // and contains matching 4096 value when latest OS X kernel
- // supports 4,096 KB filesystem block size
- var statfs syscall.Statfs_t
- syscall.Fstatfs(int(f.Fd()), &statfs)
- blockSize := int64(statfs.Bsize)
-
- if stat.Blocks*blockSize >= sizeInBytes {
- // enough blocks are already allocated
- return nil
- }
- }
- return errno
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go
deleted file mode 100644
index 50bd84f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-func preallocExtend(f *os.File, sizeInBytes int64) error {
- // use mode = 0 to change size
- err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
- if err != nil {
- errno, ok := err.(syscall.Errno)
- // not supported; fallback
- // fallocate EINTRs frequently in some environments; fallback
- if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
- return preallocExtendTrunc(f, sizeInBytes)
- }
- }
- return err
-}
-
-func preallocFixed(f *os.File, sizeInBytes int64) error {
- // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
- err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
- if err != nil {
- errno, ok := err.(syscall.Errno)
- // treat not supported as nil error
- if ok && errno == syscall.ENOTSUP {
- return nil
- }
- }
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go
deleted file mode 100644
index 162fbc5..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux,!darwin
-
-package fileutil
-
-import "os"
-
-func preallocExtend(f *os.File, sizeInBytes int64) error {
- return preallocExtendTrunc(f, sizeInBytes)
-}
-
-func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
deleted file mode 100644
index 92fceab..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-)
-
-func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
- return purgeFile(dirname, suffix, max, interval, stop, nil)
-}
-
-// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
-func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string) <-chan error {
- errC := make(chan error, 1)
- go func() {
- for {
- fnames, err := ReadDir(dirname)
- if err != nil {
- errC <- err
- return
- }
- newfnames := make([]string, 0)
- for _, fname := range fnames {
- if strings.HasSuffix(fname, suffix) {
- newfnames = append(newfnames, fname)
- }
- }
- sort.Strings(newfnames)
- fnames = newfnames
- for len(newfnames) > int(max) {
- f := filepath.Join(dirname, newfnames[0])
- l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
- if err != nil {
- break
- }
- if err = os.Remove(f); err != nil {
- errC <- err
- return
- }
- if err = l.Close(); err != nil {
- plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
- errC <- err
- return
- }
- plog.Infof("purged file %s successfully", f)
- newfnames = newfnames[1:]
- }
- if purgec != nil {
- for i := 0; i < len(fnames)-len(newfnames); i++ {
- purgec <- fnames[i]
- }
- }
- select {
- case <-time.After(interval):
- case <-stop:
- return
- }
- }
- }()
- return errC
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go
deleted file mode 100644
index 54dd41f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux,!darwin
-
-package fileutil
-
-import "os"
-
-// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
-func Fsync(f *os.File) error {
- return f.Sync()
-}
-
-// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
-func Fdatasync(f *os.File) error {
- return f.Sync()
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go
deleted file mode 100644
index c2f39bf..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build darwin
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
-// may not write it to the persistent media for quite sometime and it may be
-// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
-// physical drive's buffer will also get flushed to the media.
-func Fsync(f *os.File) error {
- _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0))
- if errno == 0 {
- return nil
- }
- return errno
-}
-
-// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
-// on physical drive media.
-func Fdatasync(f *os.File) error {
- return Fsync(f)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
deleted file mode 100644
index 1bbced9..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
-func Fsync(f *os.File) error {
- return f.Sync()
-}
-
-// Fdatasync is similar to fsync(), but does not flush modified metadata
-// unless that metadata is needed in order to allow a subsequent data retrieval
-// to be correctly handled.
-func Fdatasync(f *os.File) error {
- return syscall.Fdatasync(int(f.Fd()))
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/flags/flag.go b/vendor/github.com/coreos/etcd/pkg/flags/flag.go
deleted file mode 100644
index 69c4641..0000000
--- a/vendor/github.com/coreos/etcd/pkg/flags/flag.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package flags implements command-line flag parsing.
-package flags
-
-import (
- "flag"
- "fmt"
- "net/url"
- "os"
- "strings"
-
- "github.com/coreos/pkg/capnslog"
- "github.com/spf13/pflag"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/flags")
-)
-
-// DeprecatedFlag encapsulates a flag that may have been previously valid but
-// is now deprecated. If a DeprecatedFlag is set, an error occurs.
-type DeprecatedFlag struct {
- Name string
-}
-
-func (f *DeprecatedFlag) Set(_ string) error {
- return fmt.Errorf(`flag "-%s" is no longer supported.`, f.Name)
-}
-
-func (f *DeprecatedFlag) String() string {
- return ""
-}
-
-// IgnoredFlag encapsulates a flag that may have been previously valid but is
-// now ignored. If an IgnoredFlag is set, a warning is printed and
-// operation continues.
-type IgnoredFlag struct {
- Name string
-}
-
-// IsBoolFlag is defined to allow the flag to be defined without an argument
-func (f *IgnoredFlag) IsBoolFlag() bool {
- return true
-}
-
-func (f *IgnoredFlag) Set(s string) error {
- plog.Warningf(`flag "-%s" is no longer supported - ignoring.`, f.Name)
- return nil
-}
-
-func (f *IgnoredFlag) String() string {
- return ""
-}
-
-// SetFlagsFromEnv parses all registered flags in the given flagset,
-// and if they are not already set it attempts to set their values from
-// environment variables. Environment variables take the name of the flag but
-// are UPPERCASE, have the given prefix and any dashes are replaced by
-// underscores - for example: some-flag => ETCD_SOME_FLAG
-func SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error {
- var err error
- alreadySet := make(map[string]bool)
- fs.Visit(func(f *flag.Flag) {
- alreadySet[FlagToEnv(prefix, f.Name)] = true
- })
- usedEnvKey := make(map[string]bool)
- fs.VisitAll(func(f *flag.Flag) {
- if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, true); serr != nil {
- err = serr
- }
- })
- verifyEnv(prefix, usedEnvKey, alreadySet)
- return err
-}
-
-// SetPflagsFromEnv is similar to SetFlagsFromEnv. However, the accepted flagset type is pflag.FlagSet
-// and it does not do any logging.
-func SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error {
- var err error
- alreadySet := make(map[string]bool)
- usedEnvKey := make(map[string]bool)
- fs.VisitAll(func(f *pflag.Flag) {
- if f.Changed {
- alreadySet[FlagToEnv(prefix, f.Name)] = true
- }
- if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, false); serr != nil {
- err = serr
- }
- })
- verifyEnv(prefix, usedEnvKey, alreadySet)
- return err
-}
-
-// FlagToEnv converts flag string to upper-case environment variable key string.
-func FlagToEnv(prefix, name string) string {
- return prefix + "_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
-}
-
-func verifyEnv(prefix string, usedEnvKey, alreadySet map[string]bool) {
- for _, env := range os.Environ() {
- kv := strings.SplitN(env, "=", 2)
- if len(kv) != 2 {
- plog.Warningf("found invalid env %s", env)
- }
- if usedEnvKey[kv[0]] {
- continue
- }
- if alreadySet[kv[0]] {
- // TODO: exit with error in v3.4
- plog.Warningf("recognized environment variable %s, but unused: shadowed by corresponding flag", kv[0])
- continue
- }
- if strings.HasPrefix(env, prefix+"_") {
- plog.Warningf("unrecognized environment variable %s", env)
- }
- }
-}
-
-type flagSetter interface {
- Set(fk string, fv string) error
-}
-
-func setFlagFromEnv(fs flagSetter, prefix, fname string, usedEnvKey, alreadySet map[string]bool, log bool) error {
- key := FlagToEnv(prefix, fname)
- if !alreadySet[key] {
- val := os.Getenv(key)
- if val != "" {
- usedEnvKey[key] = true
- if serr := fs.Set(fname, val); serr != nil {
- return fmt.Errorf("invalid value %q for %s: %v", val, key, serr)
- }
- if log {
- plog.Infof("recognized and used environment variable %s=%s", key, val)
- }
- }
- }
- return nil
-}
-
-// URLsFromFlag returns a slices from url got from the flag.
-func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
- return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue))
-}
-
-func IsSet(fs *flag.FlagSet, name string) bool {
- set := false
- fs.Visit(func(f *flag.Flag) {
- if f.Name == name {
- set = true
- }
- })
- return set
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/flags/strings.go b/vendor/github.com/coreos/etcd/pkg/flags/strings.go
deleted file mode 100644
index 89bdf95..0000000
--- a/vendor/github.com/coreos/etcd/pkg/flags/strings.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package flags
-
-import (
- "errors"
- "flag"
- "sort"
- "strings"
-)
-
-// NewStringsFlag creates a new string flag for which any one of the given
-// strings is a valid value, and any other value is an error.
-//
-// valids[0] will be default value. Caller must be sure len(valids)!=0 or
-// it will panic.
-func NewStringsFlag(valids ...string) *StringsFlag {
- return &StringsFlag{Values: valids, val: valids[0]}
-}
-
-// StringsFlag implements the flag.Value interface.
-type StringsFlag struct {
- Values []string
- val string
-}
-
-// Set verifies the argument to be a valid member of the allowed values
-// before setting the underlying flag value.
-func (ss *StringsFlag) Set(s string) error {
- for _, v := range ss.Values {
- if s == v {
- ss.val = s
- return nil
- }
- }
- return errors.New("invalid value")
-}
-
-// String returns the set value (if any) of the StringsFlag
-func (ss *StringsFlag) String() string {
- return ss.val
-}
-
-// StringsValueV2 wraps "sort.StringSlice".
-type StringsValueV2 sort.StringSlice
-
-// Set parses a command line set of strings, separated by comma.
-// Implements "flag.Value" interface.
-func (ss *StringsValueV2) Set(s string) error {
- *ss = strings.Split(s, ",")
- return nil
-}
-
-// String implements "flag.Value" interface.
-func (ss *StringsValueV2) String() string { return strings.Join(*ss, ",") }
-
-// NewStringsValueV2 implements string slice as "flag.Value" interface.
-// Given value is to be separated by comma.
-func NewStringsValueV2(s string) (ss *StringsValueV2) {
- if s == "" {
- return &StringsValueV2{}
- }
- ss = new(StringsValueV2)
- if err := ss.Set(s); err != nil {
- plog.Panicf("new StringsValueV2 should never fail: %v", err)
- }
- return ss
-}
-
-// StringsFromFlagV2 returns a string slice from the flag.
-func StringsFromFlagV2(fs *flag.FlagSet, flagName string) []string {
- return []string(*fs.Lookup(flagName).Value.(*StringsValueV2))
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/flags/urls.go b/vendor/github.com/coreos/etcd/pkg/flags/urls.go
deleted file mode 100644
index 6383d7e..0000000
--- a/vendor/github.com/coreos/etcd/pkg/flags/urls.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package flags
-
-import (
- "strings"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-type URLsValue types.URLs
-
-// Set parses a command line set of URLs formatted like:
-// http://127.0.0.1:2380,http://10.1.1.2:80
-func (us *URLsValue) Set(s string) error {
- strs := strings.Split(s, ",")
- nus, err := types.NewURLs(strs)
- if err != nil {
- return err
- }
-
- *us = URLsValue(nus)
- return nil
-}
-
-func (us *URLsValue) String() string {
- all := make([]string, len(*us))
- for i, u := range *us {
- all[i] = u.String()
- }
- return strings.Join(all, ",")
-}
-
-func NewURLsValue(init string) *URLsValue {
- v := &URLsValue{}
- if err := v.Set(init); err != nil {
- plog.Panicf("new URLsValue should never fail: %v", err)
- }
- return v
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
deleted file mode 100644
index 09f44e7..0000000
--- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// borrowed from golang/net/context/ctxhttp/cancelreq.go
-
-// Package httputil provides HTTP utility functions.
-package httputil
-
-import (
- "io"
- "io/ioutil"
- "net/http"
-)
-
-// GracefulClose drains http.Response.Body until it hits EOF
-// and closes it. This prevents TCP/TLS connections from closing,
-// therefore available for reuse.
-func GracefulClose(resp *http.Response) {
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go
deleted file mode 100644
index 2da2106..0000000
--- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package idutil implements utility functions for generating unique,
-// randomized ids.
-package idutil
-
-import (
- "math"
- "sync"
- "time"
-)
-
-const (
- tsLen = 5 * 8
- cntLen = 8
- suffixLen = tsLen + cntLen
-)
-
-// Generator generates unique identifiers based on counters, timestamps, and
-// a node member ID.
-//
-// The initial id is in this format:
-// High order 2 bytes are from memberID, next 5 bytes are from timestamp,
-// and low order one byte is a counter.
-// | prefix | suffix |
-// | 2 bytes | 5 bytes | 1 byte |
-// | memberID | timestamp | cnt |
-//
-// The timestamp 5 bytes is different when the machine is restart
-// after 1 ms and before 35 years.
-//
-// It increases suffix to generate the next id.
-// The count field may overflow to timestamp field, which is intentional.
-// It helps to extend the event window to 2^56. This doesn't break that
-// id generated after restart is unique because etcd throughput is <<
-// 256req/ms(250k reqs/second).
-type Generator struct {
- mu sync.Mutex
- // high order 2 bytes
- prefix uint64
- // low order 6 bytes
- suffix uint64
-}
-
-func NewGenerator(memberID uint16, now time.Time) *Generator {
- prefix := uint64(memberID) << suffixLen
- unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
- suffix := lowbit(unixMilli, tsLen) << cntLen
- return &Generator{
- prefix: prefix,
- suffix: suffix,
- }
-}
-
-// Next generates a id that is unique.
-func (g *Generator) Next() uint64 {
- g.mu.Lock()
- defer g.mu.Unlock()
- g.suffix++
- id := g.prefix | lowbit(g.suffix, suffixLen)
- return id
-}
-
-func lowbit(x uint64, n uint) uint64 {
- return x & (math.MaxUint64 >> (64 - n))
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go
deleted file mode 100644
index 72de159..0000000
--- a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ioutil
-
-import (
- "io"
-)
-
-var defaultBufferBytes = 128 * 1024
-
-// PageWriter implements the io.Writer interface so that writes will
-// either be in page chunks or from flushing.
-type PageWriter struct {
- w io.Writer
- // pageOffset tracks the page offset of the base of the buffer
- pageOffset int
- // pageBytes is the number of bytes per page
- pageBytes int
- // bufferedBytes counts the number of bytes pending for write in the buffer
- bufferedBytes int
- // buf holds the write buffer
- buf []byte
- // bufWatermarkBytes is the number of bytes the buffer can hold before it needs
- // to be flushed. It is less than len(buf) so there is space for slack writes
- // to bring the writer to page alignment.
- bufWatermarkBytes int
-}
-
-// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes
-// to write per page. pageOffset is the starting offset of io.Writer.
-func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
- return &PageWriter{
- w: w,
- pageOffset: pageOffset,
- pageBytes: pageBytes,
- buf: make([]byte, defaultBufferBytes+pageBytes),
- bufWatermarkBytes: defaultBufferBytes,
- }
-}
-
-func (pw *PageWriter) Write(p []byte) (n int, err error) {
- if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes {
- // no overflow
- copy(pw.buf[pw.bufferedBytes:], p)
- pw.bufferedBytes += len(p)
- return len(p), nil
- }
- // complete the slack page in the buffer if unaligned
- slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes)
- if slack != pw.pageBytes {
- partial := slack > len(p)
- if partial {
- // not enough data to complete the slack page
- slack = len(p)
- }
- // special case: writing to slack page in buffer
- copy(pw.buf[pw.bufferedBytes:], p[:slack])
- pw.bufferedBytes += slack
- n = slack
- p = p[slack:]
- if partial {
- // avoid forcing an unaligned flush
- return n, nil
- }
- }
- // buffer contents are now page-aligned; clear out
- if err = pw.Flush(); err != nil {
- return n, err
- }
- // directly write all complete pages without copying
- if len(p) > pw.pageBytes {
- pages := len(p) / pw.pageBytes
- c, werr := pw.w.Write(p[:pages*pw.pageBytes])
- n += c
- if werr != nil {
- return n, werr
- }
- p = p[pages*pw.pageBytes:]
- }
- // write remaining tail to buffer
- c, werr := pw.Write(p)
- n += c
- return n, werr
-}
-
-func (pw *PageWriter) Flush() error {
- if pw.bufferedBytes == 0 {
- return nil
- }
- _, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
- pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
- pw.bufferedBytes = 0
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go b/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go
deleted file mode 100644
index d3efcfe..0000000
--- a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ioutil
-
-import (
- "fmt"
- "io"
-)
-
-// ReaderAndCloser implements io.ReadCloser interface by combining
-// reader and closer together.
-type ReaderAndCloser struct {
- io.Reader
- io.Closer
-}
-
-var (
- ErrShortRead = fmt.Errorf("ioutil: short read")
- ErrExpectEOF = fmt.Errorf("ioutil: expect EOF")
-)
-
-// NewExactReadCloser returns a ReadCloser that returns errors if the underlying
-// reader does not read back exactly the requested number of bytes.
-func NewExactReadCloser(rc io.ReadCloser, totalBytes int64) io.ReadCloser {
- return &exactReadCloser{rc: rc, totalBytes: totalBytes}
-}
-
-type exactReadCloser struct {
- rc io.ReadCloser
- br int64
- totalBytes int64
-}
-
-func (e *exactReadCloser) Read(p []byte) (int, error) {
- n, err := e.rc.Read(p)
- e.br += int64(n)
- if e.br > e.totalBytes {
- return 0, ErrExpectEOF
- }
- if e.br < e.totalBytes && n == 0 {
- return 0, ErrShortRead
- }
- return n, err
-}
-
-func (e *exactReadCloser) Close() error {
- if err := e.rc.Close(); err != nil {
- return err
- }
- if e.br < e.totalBytes {
- return ErrShortRead
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go b/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go
deleted file mode 100644
index 0703ed4..0000000
--- a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ioutil implements I/O utility functions.
-package ioutil
-
-import "io"
-
-// NewLimitedBufferReader returns a reader that reads from the given reader
-// but limits the amount of data returned to at most n bytes.
-func NewLimitedBufferReader(r io.Reader, n int) io.Reader {
- return &limitedBufferReader{
- r: r,
- n: n,
- }
-}
-
-type limitedBufferReader struct {
- r io.Reader
- n int
-}
-
-func (r *limitedBufferReader) Read(p []byte) (n int, err error) {
- np := p
- if len(np) > r.n {
- np = np[:r.n]
- }
- return r.r.Read(np)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go
deleted file mode 100644
index 192ad88..0000000
--- a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ioutil
-
-import (
- "io"
- "os"
-
- "github.com/coreos/etcd/pkg/fileutil"
-)
-
-// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library,
-// but calls Sync before closing the file. WriteAndSyncFile guarantees the data
-// is synced if there is no error returned.
-func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error {
- f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
- if err != nil {
- return err
- }
- n, err := f.Write(data)
- if err == nil && n < len(data) {
- err = io.ErrShortWrite
- }
- if err == nil {
- err = fileutil.Fsync(f)
- }
- if err1 := f.Close(); err == nil {
- err = err1
- }
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
deleted file mode 100644
index cc750f4..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package logutil includes utilities to facilitate logging.
-package logutil
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- defaultMergePeriod = time.Second
- defaultTimeOutputScale = 10 * time.Millisecond
-
- outputInterval = time.Second
-)
-
-// line represents a log line that can be printed out
-// through capnslog.PackageLogger.
-type line struct {
- level capnslog.LogLevel
- str string
-}
-
-func (l line) append(s string) line {
- return line{
- level: l.level,
- str: l.str + " " + s,
- }
-}