VOL-1497 : Add more control to kv/memory access
- Added kv locking mechanism (etcd only)
- (watch) control path access whenever possible
- (watch) use a transaction for updates and merge with memory
- cleaned up vendoring
- misc changes to fix exceptions found along the way
Amendments:
- Copyright header got removed in auto-generated file
- Changed default locking to false for KV list operation
- Updated backend api to allow the passing of locking parameter
Change-Id: Ie1a55d3ca8b9d92ae71a85ce42bb22fcf1419e2c
diff --git a/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go
new file mode 100644
index 0000000..1a940c3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.pb.go
@@ -0,0 +1,807 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: auth.proto
+
+/*
+ Package authpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ auth.proto
+
+ It has these top-level messages:
+ User
+ Permission
+ Role
+*/
+package authpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Permission_Type int32
+
+const (
+ READ Permission_Type = 0
+ WRITE Permission_Type = 1
+ READWRITE Permission_Type = 2
+)
+
+var Permission_Type_name = map[int32]string{
+ 0: "READ",
+ 1: "WRITE",
+ 2: "READWRITE",
+}
+var Permission_Type_value = map[string]int32{
+ "READ": 0,
+ "WRITE": 1,
+ "READWRITE": 2,
+}
+
+func (x Permission_Type) String() string {
+ return proto.EnumName(Permission_Type_name, int32(x))
+}
+func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
+
+// User is a single entry in the bucket authUsers
+type User struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
+
+// Permission is a single entity
+type Permission struct {
+ PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (m *Permission) Reset() { *m = Permission{} }
+func (m *Permission) String() string { return proto.CompactTextString(m) }
+func (*Permission) ProtoMessage() {}
+func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
+
+// Role is a single entry in the bucket authRoles
+type Role struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
+}
+
+func (m *Role) Reset() { *m = Role{} }
+func (m *Role) String() string { return proto.CompactTextString(m) }
+func (*Role) ProtoMessage() {}
+func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
+
+func init() {
+ proto.RegisterType((*User)(nil), "authpb.User")
+ proto.RegisterType((*Permission)(nil), "authpb.Permission")
+ proto.RegisterType((*Role)(nil), "authpb.Role")
+ proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
+}
+func (m *User) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *User) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *Permission) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.PermType != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ return i, nil
+}
+
+func (m *Role) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Role) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.KeyPermission) > 0 {
+ for _, msg := range m.KeyPermission {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintAuth(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *User) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Permission) Size() (n int) {
+ var l int
+ _ = l
+ if m.PermType != 0 {
+ n += 1 + sovAuth(uint64(m.PermType))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ return n
+}
+
+func (m *Role) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if len(m.KeyPermission) > 0 {
+ for _, e := range m.KeyPermission {
+ l = e.Size()
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovAuth(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozAuth(x uint64) (n int) {
+ return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *User) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: User: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
+ if m.Password == nil {
+ m.Password = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Permission) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Permission: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
+ }
+ m.PermType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.PermType |= (Permission_Type(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Role) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Role: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KeyPermission = append(m.KeyPermission, &Permission{})
+ if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipAuth(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthAuth
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipAuth(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) }
+
+var fileDescriptorAuth = []byte{
+ // 288 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
+ 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
+ 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
+ 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
+ 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
+ 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
+ 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
+ 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
+ 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
+ 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
+ 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
+ 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
+ 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
+ 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
+ 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
+ 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
+ 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
+ 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/auth/authpb/auth.proto b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto
new file mode 100644
index 0000000..001d334
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/authpb/auth.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+package authpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+// User is a single entry in the bucket authUsers
+message User {
+ bytes name = 1;
+ bytes password = 2;
+ repeated string roles = 3;
+}
+
+// Permission is a single entity
+message Permission {
+ enum Type {
+ READ = 0;
+ WRITE = 1;
+ READWRITE = 2;
+ }
+ Type permType = 1;
+
+ bytes key = 2;
+ bytes range_end = 3;
+}
+
+// Role is a single entry in the bucket authRoles
+message Role {
+ bytes name = 1;
+
+ repeated Permission keyPermission = 2;
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/README.md b/vendor/go.etcd.io/etcd/clientv3/README.md
index 376bfba..a249b73 100644
--- a/vendor/go.etcd.io/etcd/clientv3/README.md
+++ b/vendor/go.etcd.io/etcd/clientv3/README.md
@@ -1,13 +1,16 @@
# etcd/clientv3
-[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
+[![Docs](https://readthedocs.org/projects/etcd/badge/?version=latest&style=flat-square)](https://etcd.readthedocs.io/en/latest/?badge=latest)
+[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3)
`etcd/clientv3` is the official Go etcd client for v3.
+See https://etcd.readthedocs.io/en/latest for latest client architecture.
+
## Install
```bash
-go get github.com/coreos/etcd/clientv3
+go get go.etcd.io/etcd/clientv3
```
## Get started
@@ -26,7 +29,7 @@
```
etcd v3 uses [`gRPC`](http://www.grpc.io) for remote procedure calls. And `clientv3` uses
-[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
+[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
pass `context.WithTimeout` to APIs:
@@ -40,19 +43,14 @@
// use the response
```
-etcd uses `cmd/vendor` directory to store external dependencies, which are
-to be compiled into etcd release binaries. `client` can be imported without
-vendoring. For full compatibility, it is recommended to vendor builds using
-etcd's vendored packages, using tools like godep, as in
-[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
-For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
+For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
## Error Handling
etcd client returns 2 types of errors:
1. context error: canceled or deadline exceeded.
-2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes).
+2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes).
Here is the example code to handle client errors:
@@ -74,12 +72,16 @@
## Metrics
-The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go).
+The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go).
## Namespacing
-The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
+The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
+
+## Request size limit
+
+Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`.
## Examples
-More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3).
+More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3).
diff --git a/vendor/go.etcd.io/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go
index 7545bb6..921f50f 100644
--- a/vendor/go.etcd.io/etcd/clientv3/auth.go
+++ b/vendor/go.etcd.io/etcd/clientv3/auth.go
@@ -19,9 +19,8 @@
"fmt"
"strings"
- "github.com/coreos/etcd/auth/authpb"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
+ "go.etcd.io/etcd/auth/authpb"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
@@ -100,70 +99,70 @@
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
}
-type auth struct {
+type authClient struct {
remote pb.AuthClient
callOpts []grpc.CallOption
}
func NewAuth(c *Client) Auth {
- api := &auth{remote: RetryAuthClient(c)}
+ api := &authClient{remote: RetryAuthClient(c)}
if c != nil {
api.callOpts = c.callOpts
}
return api
}
-func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
+func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
+func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
+func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
+func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
+func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
+func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
+func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
+func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
+func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
+func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
+func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
perm := &authpb.Permission{
Key: []byte(key),
RangeEnd: []byte(rangeEnd),
@@ -173,22 +172,22 @@
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
+func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
+func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
- resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
+func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
+ resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
-func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
+func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
@@ -216,8 +215,8 @@
auth.conn.Close()
}
-func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
- conn, err := grpc.Dial(endpoint, opts...)
+func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
+ conn, err := grpc.DialContext(ctx, target, opts...)
if err != nil {
return nil, err
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
new file mode 100644
index 0000000..25dc2b7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/balancer.go
@@ -0,0 +1,275 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package balancer
+
+import (
+ "fmt"
+ "strconv"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/clientv3/balancer/picker"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/resolver"
+ _ "google.golang.org/grpc/resolver/dns" // register DNS resolver
+ _ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
+)
+
+// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
+// must be invoked at initialization time.
+func RegisterBuilder(cfg Config) {
+ bb := &builder{cfg}
+ balancer.Register(bb)
+
+ bb.cfg.Logger.Info(
+ "registered balancer",
+ zap.String("policy", bb.cfg.Policy.String()),
+ zap.String("name", bb.cfg.Name),
+ )
+}
+
+type builder struct {
+ cfg Config
+}
+
+// Build is called initially when creating "ccBalancerWrapper".
+// "grpc.Dial" is called to this client connection.
+// Then, resolved addresses will be handled via "HandleResolvedAddrs".
+func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+ bb := &baseBalancer{
+ id: strconv.FormatInt(time.Now().UnixNano(), 36),
+ policy: b.cfg.Policy,
+ name: b.cfg.Policy.String(),
+ lg: b.cfg.Logger,
+
+ addrToSc: make(map[resolver.Address]balancer.SubConn),
+ scToAddr: make(map[balancer.SubConn]resolver.Address),
+ scToSt: make(map[balancer.SubConn]connectivity.State),
+
+ currentConn: nil,
+ csEvltr: &connectivityStateEvaluator{},
+
+ // initialize picker always returns "ErrNoSubConnAvailable"
+ Picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
+ }
+ if b.cfg.Name != "" {
+ bb.name = b.cfg.Name
+ }
+ if bb.lg == nil {
+ bb.lg = zap.NewNop()
+ }
+
+ // TODO: support multiple connections
+ bb.mu.Lock()
+ bb.currentConn = cc
+ bb.mu.Unlock()
+
+ bb.lg.Info(
+ "built balancer",
+ zap.String("balancer-id", bb.id),
+ zap.String("policy", bb.policy.String()),
+ zap.String("resolver-target", cc.Target()),
+ )
+ return bb
+}
+
+// Name implements "grpc/balancer.Builder" interface.
+func (b *builder) Name() string { return b.cfg.Name }
+
+// Balancer defines client balancer interface.
+type Balancer interface {
+ // Balancer is called on specified client connection. Client initiates gRPC
+ // connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
+ // addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
+ // For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
+ // "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
+ // changes, thus requires failover logic in this method.
+ balancer.Balancer
+
+ // Picker calls "Pick" for every client request.
+ picker.Picker
+}
+
+type baseBalancer struct {
+ id string
+ policy picker.Policy
+ name string
+ lg *zap.Logger
+
+ mu sync.RWMutex
+
+ addrToSc map[resolver.Address]balancer.SubConn
+ scToAddr map[balancer.SubConn]resolver.Address
+ scToSt map[balancer.SubConn]connectivity.State
+
+ currentConn balancer.ClientConn
+ currentState connectivity.State
+ csEvltr *connectivityStateEvaluator
+
+ picker.Picker
+}
+
+// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
+// gRPC sends initial or updated resolved addresses from "Build".
+func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+ if err != nil {
+ bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
+ return
+ }
+ bb.lg.Info("resolved", zap.String("balancer-id", bb.id), zap.Strings("addresses", addrsToStrings(addrs)))
+
+ bb.mu.Lock()
+ defer bb.mu.Unlock()
+
+ resolved := make(map[resolver.Address]struct{})
+ for _, addr := range addrs {
+ resolved[addr] = struct{}{}
+ if _, ok := bb.addrToSc[addr]; !ok {
+ sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
+ if err != nil {
+ bb.lg.Warn("NewSubConn failed", zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
+ continue
+ }
+ bb.addrToSc[addr] = sc
+ bb.scToAddr[sc] = addr
+ bb.scToSt[sc] = connectivity.Idle
+ sc.Connect()
+ }
+ }
+
+ for addr, sc := range bb.addrToSc {
+ if _, ok := resolved[addr]; !ok {
+ // was removed by resolver or failed to create subconn
+ bb.currentConn.RemoveSubConn(sc)
+ delete(bb.addrToSc, addr)
+
+ bb.lg.Info(
+ "removed subconn",
+ zap.String("balancer-id", bb.id),
+ zap.String("address", addr.Addr),
+ zap.String("subconn", scToString(sc)),
+ )
+
+ // Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
+ // The entry will be deleted in HandleSubConnStateChange.
+ // (DO NOT) delete(bb.scToAddr, sc)
+ // (DO NOT) delete(bb.scToSt, sc)
+ }
+ }
+}
+
+// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
+func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ bb.mu.Lock()
+ defer bb.mu.Unlock()
+
+ old, ok := bb.scToSt[sc]
+ if !ok {
+ bb.lg.Warn(
+ "state change for an unknown subconn",
+ zap.String("balancer-id", bb.id),
+ zap.String("subconn", scToString(sc)),
+ zap.String("state", s.String()),
+ )
+ return
+ }
+
+ bb.lg.Info(
+ "state changed",
+ zap.String("balancer-id", bb.id),
+ zap.Bool("connected", s == connectivity.Ready),
+ zap.String("subconn", scToString(sc)),
+ zap.String("address", bb.scToAddr[sc].Addr),
+ zap.String("old-state", old.String()),
+ zap.String("new-state", s.String()),
+ )
+
+ bb.scToSt[sc] = s
+ switch s {
+ case connectivity.Idle:
+ sc.Connect()
+ case connectivity.Shutdown:
+ // When an address was removed by resolver, b called RemoveSubConn but
+ // kept the sc's state in scToSt. Remove state for this sc here.
+ delete(bb.scToAddr, sc)
+ delete(bb.scToSt, sc)
+ }
+
+ oldAggrState := bb.currentState
+ bb.currentState = bb.csEvltr.recordTransition(old, s)
+
+ // Regenerate picker when one of the following happens:
+ // - this sc became ready from not-ready
+ // - this sc became not-ready from ready
+ // - the aggregated state of balancer became TransientFailure from non-TransientFailure
+ // - the aggregated state of balancer became non-TransientFailure from TransientFailure
+ if (s == connectivity.Ready) != (old == connectivity.Ready) ||
+ (bb.currentState == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+ bb.regeneratePicker()
+ }
+
+ bb.currentConn.UpdateBalancerState(bb.currentState, bb.Picker)
+ return
+}
+
+func (bb *baseBalancer) regeneratePicker() {
+ if bb.currentState == connectivity.TransientFailure {
+ bb.lg.Info(
+ "generated transient error picker",
+ zap.String("balancer-id", bb.id),
+ zap.String("policy", bb.policy.String()),
+ )
+ bb.Picker = picker.NewErr(balancer.ErrTransientFailure)
+ return
+ }
+
+ // only pass ready subconns to picker
+ scs := make([]balancer.SubConn, 0)
+ addrToSc := make(map[resolver.Address]balancer.SubConn)
+ scToAddr := make(map[balancer.SubConn]resolver.Address)
+ for addr, sc := range bb.addrToSc {
+ if st, ok := bb.scToSt[sc]; ok && st == connectivity.Ready {
+ scs = append(scs, sc)
+ addrToSc[addr] = sc
+ scToAddr[sc] = addr
+ }
+ }
+
+ switch bb.policy {
+ case picker.RoundrobinBalanced:
+ bb.Picker = picker.NewRoundrobinBalanced(bb.lg, scs, addrToSc, scToAddr)
+
+ default:
+ panic(fmt.Errorf("invalid balancer picker policy (%d)", bb.policy))
+ }
+
+ bb.lg.Info(
+ "generated picker",
+ zap.String("balancer-id", bb.id),
+ zap.String("policy", bb.policy.String()),
+ zap.Strings("subconn-ready", scsToStrings(addrToSc)),
+ zap.Int("subconn-size", len(addrToSc)),
+ )
+}
+
+// Close implements "grpc/balancer.Balancer" interface.
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call RemoveSubConn for the SubConns.
+func (bb *baseBalancer) Close() {
+ // TODO
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/config.go b/vendor/go.etcd.io/etcd/clientv3/balancer/config.go
new file mode 100644
index 0000000..0339a84
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/config.go
@@ -0,0 +1,36 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package balancer
+
+import (
+ "go.etcd.io/etcd/clientv3/balancer/picker"
+
+ "go.uber.org/zap"
+)
+
+// Config defines balancer configurations.
+type Config struct {
+ // Policy configures balancer policy.
+ Policy picker.Policy
+
+ // Name defines an additional name for balancer.
+ // Useful for balancer testing to avoid register conflicts.
+ // If empty, defaults to policy name.
+ Name string
+
+ // Logger configures balancer logging.
+ // If nil, logs are discarded.
+ Logger *zap.Logger
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity.go b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity.go
new file mode 100644
index 0000000..6cdeb3f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/connectivity.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package balancer
+
+import "google.golang.org/grpc/connectivity"
+
+// connectivityStateEvaluator gets updated by addrConns when their
+// states transition, based on which it evaluates the state of
+// ClientConn.
+type connectivityStateEvaluator struct {
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
+ numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+// recordTransition records state change happening in every subConn and based on
+// that it evaluates what aggregated state should be.
+// It can only transition between Ready, Connecting and TransientFailure. Other states,
+// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
+// before any subConn is created ClientConn is in idle state. In the end when ClientConn
+// closes it is in Shutdown state.
+//
+// recordTransition should only be called synchronously from the same goroutine.
+func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
+ // Update counters.
+ for idx, state := range []connectivity.State{oldState, newState} {
+ updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+ switch state {
+ case connectivity.Ready:
+ cse.numReady += updateVal
+ case connectivity.Connecting:
+ cse.numConnecting += updateVal
+ case connectivity.TransientFailure:
+ cse.numTransientFailure += updateVal
+ }
+ }
+
+ // Evaluate.
+ if cse.numReady > 0 {
+ return connectivity.Ready
+ }
+ if cse.numConnecting > 0 {
+ return connectivity.Connecting
+ }
+ return connectivity.TransientFailure
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/doc.go b/vendor/go.etcd.io/etcd/clientv3/balancer/doc.go
new file mode 100644
index 0000000..45af5e9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package balancer implements client balancer.
+package balancer
diff --git a/vendor/go.etcd.io/etcd/clientv3/health_balancer.go b/vendor/go.etcd.io/etcd/clientv3/balancer/grpc1.7-health.go
similarity index 72%
rename from vendor/go.etcd.io/etcd/clientv3/health_balancer.go
rename to vendor/go.etcd.io/etcd/clientv3/balancer/grpc1.7-health.go
index 5918cba..7d24b93 100644
--- a/vendor/go.etcd.io/etcd/clientv3/health_balancer.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/grpc1.7-health.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The etcd Authors
+// Copyright 2018 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,11 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package clientv3
+package balancer
import (
"context"
"errors"
+ "io/ioutil"
"net/url"
"strings"
"sync"
@@ -24,10 +25,14 @@
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
+// TODO: replace with something better
+var lg = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)
+
const (
minHealthRetryDuration = 3 * time.Second
unknownService = "unknown service grpc.health.v1.Health"
@@ -38,18 +43,16 @@
// This error is returned only when opts.BlockingWait is true.
var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available")
-type healthCheckFunc func(ep string) (bool, error)
-
-type notifyMsg int
+type NotifyMsg int
const (
- notifyReset notifyMsg = iota
- notifyNext
+ NotifyReset NotifyMsg = iota
+ NotifyNext
)
-// healthBalancer does the bare minimum to expose multiple eps
+// GRPC17Health does the bare minimum to expose multiple eps
// to the grpc reconnection code path
-type healthBalancer struct {
+type GRPC17Health struct {
// addrs are the client's endpoint addresses for grpc
addrs []grpc.Address
@@ -64,7 +67,7 @@
readyOnce sync.Once
// healthCheck checks an endpoint's health.
- healthCheck healthCheckFunc
+ healthCheck func(ep string) (bool, error)
healthCheckTimeout time.Duration
unhealthyMu sync.RWMutex
@@ -88,7 +91,7 @@
donec chan struct{}
// updateAddrsC notifies updateNotifyLoop to update addrs.
- updateAddrsC chan notifyMsg
+ updateAddrsC chan NotifyMsg
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
@@ -102,21 +105,29 @@
closed bool
}
-func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer {
+// DialFunc defines gRPC dial function.
+type DialFunc func(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error)
+
+// NewGRPC17Health returns a new health balancer with gRPC v1.7.
+func NewGRPC17Health(
+ eps []string,
+ timeout time.Duration,
+ dialFunc DialFunc,
+) *GRPC17Health {
notifyCh := make(chan []grpc.Address)
addrs := eps2addrs(eps)
- hb := &healthBalancer{
+ hb := &GRPC17Health{
addrs: addrs,
eps: eps,
notifyCh: notifyCh,
readyc: make(chan struct{}),
- healthCheck: hc,
+ healthCheck: func(ep string) (bool, error) { return grpcHealthCheck(ep, dialFunc) },
unhealthyHostPorts: make(map[string]time.Time),
upc: make(chan struct{}),
stopc: make(chan struct{}),
downc: make(chan struct{}),
donec: make(chan struct{}),
- updateAddrsC: make(chan notifyMsg),
+ updateAddrsC: make(chan NotifyMsg),
hostPort2ep: getHostPort2ep(eps),
}
if timeout < minHealthRetryDuration {
@@ -134,78 +145,81 @@
return hb
}
-func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil }
+func (b *GRPC17Health) Start(target string, config grpc.BalancerConfig) error { return nil }
-func (b *healthBalancer) ConnectNotify() <-chan struct{} {
+func (b *GRPC17Health) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
-func (b *healthBalancer) ready() <-chan struct{} { return b.readyc }
+func (b *GRPC17Health) UpdateAddrsC() chan NotifyMsg { return b.updateAddrsC }
+func (b *GRPC17Health) StopC() chan struct{} { return b.stopc }
-func (b *healthBalancer) endpoint(hostPort string) string {
+func (b *GRPC17Health) Ready() <-chan struct{} { return b.readyc }
+
+func (b *GRPC17Health) Endpoint(hostPort string) string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.hostPort2ep[hostPort]
}
-func (b *healthBalancer) pinned() string {
+func (b *GRPC17Health) Pinned() string {
b.mu.RLock()
defer b.mu.RUnlock()
return b.pinAddr
}
-func (b *healthBalancer) hostPortError(hostPort string, err error) {
- if b.endpoint(hostPort) == "" {
- logger.Lvl(4).Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
+func (b *GRPC17Health) HostPortError(hostPort string, err error) {
+ if b.Endpoint(hostPort) == "" {
+ lg.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error())
return
}
b.unhealthyMu.Lock()
b.unhealthyHostPorts[hostPort] = time.Now()
b.unhealthyMu.Unlock()
- logger.Lvl(4).Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
+ lg.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error())
}
-func (b *healthBalancer) removeUnhealthy(hostPort, msg string) {
- if b.endpoint(hostPort) == "" {
- logger.Lvl(4).Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
+func (b *GRPC17Health) removeUnhealthy(hostPort, msg string) {
+ if b.Endpoint(hostPort) == "" {
+ lg.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg)
return
}
b.unhealthyMu.Lock()
delete(b.unhealthyHostPorts, hostPort)
b.unhealthyMu.Unlock()
- logger.Lvl(4).Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
+ lg.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg)
}
-func (b *healthBalancer) countUnhealthy() (count int) {
+func (b *GRPC17Health) countUnhealthy() (count int) {
b.unhealthyMu.RLock()
count = len(b.unhealthyHostPorts)
b.unhealthyMu.RUnlock()
return count
}
-func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) {
+func (b *GRPC17Health) isUnhealthy(hostPort string) (unhealthy bool) {
b.unhealthyMu.RLock()
_, unhealthy = b.unhealthyHostPorts[hostPort]
b.unhealthyMu.RUnlock()
return unhealthy
}
-func (b *healthBalancer) cleanupUnhealthy() {
+func (b *GRPC17Health) cleanupUnhealthy() {
b.unhealthyMu.Lock()
for k, v := range b.unhealthyHostPorts {
if time.Since(v) > b.healthCheckTimeout {
delete(b.unhealthyHostPorts, k)
- logger.Lvl(4).Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
+ lg.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout)
}
}
b.unhealthyMu.Unlock()
}
-func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) {
+func (b *GRPC17Health) liveAddrs() ([]grpc.Address, map[string]struct{}) {
unhealthyCnt := b.countUnhealthy()
b.mu.RLock()
@@ -231,15 +245,15 @@
return addrs, liveHostPorts
}
-func (b *healthBalancer) updateUnhealthy() {
+func (b *GRPC17Health) updateUnhealthy() {
for {
select {
case <-time.After(b.healthCheckTimeout):
b.cleanupUnhealthy()
- pinned := b.pinned()
+ pinned := b.Pinned()
if pinned == "" || b.isUnhealthy(pinned) {
select {
- case b.updateAddrsC <- notifyNext:
+ case b.updateAddrsC <- NotifyNext:
case <-b.stopc:
return
}
@@ -250,7 +264,19 @@
}
}
-func (b *healthBalancer) updateAddrs(eps ...string) {
+// NeedUpdate returns true if all connections are down or
+// addresses do not include current pinned address.
+func (b *GRPC17Health) NeedUpdate() bool {
+ // updating notifyCh can trigger new connections,
+ // need update addrs if all connections are down
+ // or addrs does not include pinAddr.
+ b.mu.RLock()
+ update := !hasAddr(b.addrs, b.pinAddr)
+ b.mu.RUnlock()
+ return update
+}
+
+func (b *GRPC17Health) UpdateAddrs(eps ...string) {
np := getHostPort2ep(eps)
b.mu.Lock()
@@ -278,12 +304,12 @@
b.unhealthyMu.Unlock()
}
-func (b *healthBalancer) next() {
+func (b *GRPC17Health) Next() {
b.mu.RLock()
downc := b.downc
b.mu.RUnlock()
select {
- case b.updateAddrsC <- notifyNext:
+ case b.updateAddrsC <- NotifyNext:
case <-b.stopc:
}
// wait until disconnect so new RPCs are not issued on old connection
@@ -293,7 +319,7 @@
}
}
-func (b *healthBalancer) updateNotifyLoop() {
+func (b *GRPC17Health) updateNotifyLoop() {
defer close(b.donec)
for {
@@ -320,7 +346,7 @@
default:
}
case downc == nil:
- b.notifyAddrs(notifyReset)
+ b.notifyAddrs(NotifyReset)
select {
case <-upc:
case msg := <-b.updateAddrsC:
@@ -338,7 +364,7 @@
}
select {
case <-downc:
- b.notifyAddrs(notifyReset)
+ b.notifyAddrs(NotifyReset)
case msg := <-b.updateAddrsC:
b.notifyAddrs(msg)
case <-b.stopc:
@@ -348,8 +374,8 @@
}
}
-func (b *healthBalancer) notifyAddrs(msg notifyMsg) {
- if msg == notifyNext {
+func (b *GRPC17Health) notifyAddrs(msg NotifyMsg) {
+ if msg == NotifyNext {
select {
case b.notifyCh <- []grpc.Address{}:
case <-b.stopc:
@@ -380,7 +406,7 @@
}
}
-func (b *healthBalancer) Up(addr grpc.Address) func(error) {
+func (b *GRPC17Health) Up(addr grpc.Address) func(error) {
if !b.mayPin(addr) {
return func(err error) {}
}
@@ -402,7 +428,7 @@
}
if b.pinAddr != "" {
- logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
+ lg.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr)
return func(err error) {}
}
@@ -410,7 +436,7 @@
close(b.upc)
b.downc = make(chan struct{})
b.pinAddr = addr.Addr
- logger.Lvl(4).Infof("clientv3/balancer: pin %q", addr.Addr)
+ lg.Infof("clientv3/balancer: pin %q", addr.Addr)
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
@@ -420,19 +446,19 @@
// timeout will induce a network I/O error, and retrying until success;
// finding healthy endpoint on retry could take several timeouts and redials.
// To avoid wasting retries, gray-list unhealthy endpoints.
- b.hostPortError(addr.Addr, err)
+ b.HostPortError(addr.Addr, err)
b.mu.Lock()
b.upc = make(chan struct{})
close(b.downc)
b.pinAddr = ""
b.mu.Unlock()
- logger.Lvl(4).Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
+ lg.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error())
}
}
-func (b *healthBalancer) mayPin(addr grpc.Address) bool {
- if b.endpoint(addr.Addr) == "" { // stale host:port
+func (b *GRPC17Health) mayPin(addr grpc.Address) bool {
+ if b.Endpoint(addr.Addr) == "" { // stale host:port
return false
}
@@ -454,7 +480,7 @@
// 3. grpc-healthcheck still SERVING, thus retry to pin
// instead, return before grpc-healthcheck if failed within healthcheck timeout
if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout {
- logger.Lvl(4).Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
+ lg.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout)
return false
}
@@ -463,11 +489,11 @@
return true
}
- b.hostPortError(addr.Addr, errors.New("health check failed"))
+ b.HostPortError(addr.Addr, errors.New("health check failed"))
return false
}
-func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
+func (b *GRPC17Health) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var (
addr string
closed bool
@@ -515,9 +541,9 @@
return grpc.Address{Addr: addr}, func() {}, nil
}
-func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
+func (b *GRPC17Health) Notify() <-chan []grpc.Address { return b.notifyCh }
-func (b *healthBalancer) Close() error {
+func (b *GRPC17Health) Close() error {
b.mu.Lock()
// In case gRPC calls close twice. TODO: remove the checking
// when we are sure that gRPC wont call close twice.
@@ -553,8 +579,8 @@
return nil
}
-func grpcHealthCheck(client *Client, ep string) (bool, error) {
- conn, err := client.dial(ep)
+func grpcHealthCheck(ep string, dialFunc func(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error)) (bool, error) {
+ conn, err := dialFunc(ep)
if err != nil {
return false, err
}
@@ -607,3 +633,25 @@
}
return hm
}
+
+func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
+ proto = "tcp"
+ host = endpoint
+ url, uerr := url.Parse(endpoint)
+ if uerr != nil || !strings.Contains(endpoint, "://") {
+ return proto, host, scheme
+ }
+ scheme = url.Scheme
+
+ // strip scheme:// prefix since grpc dials by host
+ host = url.Host
+ switch url.Scheme {
+ case "http", "https":
+ case "unix", "unixs":
+ proto = "unix"
+ host = url.Host + url.Path
+ default:
+ proto, host = "", ""
+ }
+ return proto, host, scheme
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go
new file mode 100644
index 0000000..35dabf5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package picker defines/implements client balancer picker policy.
+package picker
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
new file mode 100644
index 0000000..c70ce15
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+ "context"
+
+ "google.golang.org/grpc/balancer"
+)
+
+// NewErr returns a picker that always returns err on "Pick".
+func NewErr(err error) Picker {
+ return &errPicker{err: err}
+}
+
+type errPicker struct {
+ err error
+}
+
+func (p *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ return nil, nil, p.err
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
new file mode 100644
index 0000000..7ea761b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker.go
@@ -0,0 +1,24 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+ "google.golang.org/grpc/balancer"
+)
+
+// Picker defines balancer Picker methods.
+type Picker interface {
+ balancer.Picker
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker_policy.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker_policy.go
new file mode 100644
index 0000000..463ddc2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/picker_policy.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import "fmt"
+
+// Policy defines balancer picker policy.
+type Policy uint8
+
+const (
+ // TODO: custom picker is not supported yet.
+ // custom defines custom balancer picker.
+ custom Policy = iota
+
+ // RoundrobinBalanced balance loads over multiple endpoints
+ // and implements failover in roundrobin fashion.
+ RoundrobinBalanced Policy = iota
+
+ // TODO: only send loads to pinned address "RoundrobinFailover"
+ // just like how 3.3 client works
+ //
+ // TODO: priotize leader
+ // TODO: health-check
+ // TODO: weighted roundrobin
+ // TODO: power of two random choice
+)
+
+func (p Policy) String() string {
+ switch p {
+ case custom:
+ panic("'custom' picker policy is not supported yet")
+ case RoundrobinBalanced:
+ return "etcd-client-roundrobin-balanced"
+ default:
+ panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
new file mode 100644
index 0000000..b043d57
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+ "context"
+ "sync"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/resolver"
+)
+
+// NewRoundrobinBalanced returns a new roundrobin balanced picker.
+func NewRoundrobinBalanced(
+ lg *zap.Logger,
+ scs []balancer.SubConn,
+ addrToSc map[resolver.Address]balancer.SubConn,
+ scToAddr map[balancer.SubConn]resolver.Address,
+) Picker {
+ return &rrBalanced{
+ lg: lg,
+ scs: scs,
+ addrToSc: addrToSc,
+ scToAddr: scToAddr,
+ }
+}
+
+type rrBalanced struct {
+ lg *zap.Logger
+
+ mu sync.RWMutex
+ next int
+ scs []balancer.SubConn
+
+ addrToSc map[resolver.Address]balancer.SubConn
+ scToAddr map[balancer.SubConn]resolver.Address
+}
+
+// Pick is called for every client request.
+func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ rb.mu.RLock()
+ n := len(rb.scs)
+ rb.mu.RUnlock()
+ if n == 0 {
+ return nil, nil, balancer.ErrNoSubConnAvailable
+ }
+
+ rb.mu.Lock()
+ cur := rb.next
+ sc := rb.scs[cur]
+ picked := rb.scToAddr[sc].Addr
+ rb.next = (rb.next + 1) % len(rb.scs)
+ rb.mu.Unlock()
+
+ rb.lg.Debug(
+ "picked",
+ zap.String("address", picked),
+ zap.Int("subconn-index", cur),
+ zap.Int("subconn-size", n),
+ )
+
+ doneFunc := func(info balancer.DoneInfo) {
+ // TODO: error handling?
+ fss := []zapcore.Field{
+ zap.Error(info.Err),
+ zap.String("address", picked),
+ zap.Bool("success", info.Err == nil),
+ zap.Bool("bytes-sent", info.BytesSent),
+ zap.Bool("bytes-received", info.BytesReceived),
+ }
+ if info.Err == nil {
+ rb.lg.Debug("balancer done", fss...)
+ } else {
+ rb.lg.Warn("balancer failed", fss...)
+ }
+ }
+ return sc, doneFunc, nil
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
new file mode 100644
index 0000000..1f32039
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
@@ -0,0 +1,240 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
+package endpoint
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+ "sync"
+
+ "google.golang.org/grpc/resolver"
+)
+
+const scheme = "endpoint"
+
+var (
+ targetPrefix = fmt.Sprintf("%s://", scheme)
+
+ bldr *builder
+)
+
+func init() {
+ bldr = &builder{
+ resolverGroups: make(map[string]*ResolverGroup),
+ }
+ resolver.Register(bldr)
+}
+
+type builder struct {
+ mu sync.RWMutex
+ resolverGroups map[string]*ResolverGroup
+}
+
+// NewResolverGroup creates a new ResolverGroup with the given id.
+func NewResolverGroup(id string) (*ResolverGroup, error) {
+ return bldr.newResolverGroup(id)
+}
+
+// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
+// up-to-date.
+type ResolverGroup struct {
+ mu sync.RWMutex
+ id string
+ endpoints []string
+ resolvers []*Resolver
+}
+
+func (e *ResolverGroup) addResolver(r *Resolver) {
+ e.mu.Lock()
+ addrs := epsToAddrs(e.endpoints...)
+ e.resolvers = append(e.resolvers, r)
+ e.mu.Unlock()
+ r.cc.NewAddress(addrs)
+}
+
+func (e *ResolverGroup) removeResolver(r *Resolver) {
+ e.mu.Lock()
+ for i, er := range e.resolvers {
+ if er == r {
+ e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
+ break
+ }
+ }
+ e.mu.Unlock()
+}
+
+// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
+// immediately with the new endpoints.
+func (e *ResolverGroup) SetEndpoints(endpoints []string) {
+ addrs := epsToAddrs(endpoints...)
+ e.mu.Lock()
+ e.endpoints = endpoints
+ for _, r := range e.resolvers {
+ r.cc.NewAddress(addrs)
+ }
+ e.mu.Unlock()
+}
+
+// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
+func (e *ResolverGroup) Target(endpoint string) string {
+ return Target(e.id, endpoint)
+}
+
+// Target constructs a endpoint resolver target.
+func Target(id, endpoint string) string {
+ return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
+}
+
+// IsTarget checks if a given target string in an endpoint resolver target.
+func IsTarget(target string) bool {
+ return strings.HasPrefix(target, "endpoint://")
+}
+
+func (e *ResolverGroup) Close() {
+ bldr.close(e.id)
+}
+
+// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+ if len(target.Authority) < 1 {
+ return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
+ }
+ id := target.Authority
+ es, err := b.getResolverGroup(id)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build resolver: %v", err)
+ }
+ r := &Resolver{
+ endpointID: id,
+ cc: cc,
+ }
+ es.addResolver(r)
+ return r, nil
+}
+
+func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
+ b.mu.RLock()
+ _, ok := b.resolverGroups[id]
+ b.mu.RUnlock()
+ if ok {
+ return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
+ }
+
+ es := &ResolverGroup{id: id}
+ b.mu.Lock()
+ b.resolverGroups[id] = es
+ b.mu.Unlock()
+ return es, nil
+}
+
+func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
+ b.mu.RLock()
+ es, ok := b.resolverGroups[id]
+ b.mu.RUnlock()
+ if !ok {
+ return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
+ }
+ return es, nil
+}
+
+func (b *builder) close(id string) {
+ b.mu.Lock()
+ delete(b.resolverGroups, id)
+ b.mu.Unlock()
+}
+
+func (b *builder) Scheme() string {
+ return scheme
+}
+
+// Resolver provides a resolver for a single etcd cluster, identified by name.
+type Resolver struct {
+ endpointID string
+ cc resolver.ClientConn
+ sync.RWMutex
+}
+
+// TODO: use balancer.epsToAddrs
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+ addrs = make([]resolver.Address, 0, len(eps))
+ for _, ep := range eps {
+ addrs = append(addrs, resolver.Address{Addr: ep})
+ }
+ return addrs
+}
+
+func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
+
+func (r *Resolver) Close() {
+ es, err := bldr.getResolverGroup(r.endpointID)
+ if err != nil {
+ return
+ }
+ es.removeResolver(r)
+}
+
+// ParseEndpoint endpoint parses an endpoint of the form
+// (http|https)://<host>*|(unix|unixs)://<path>)
+// and returns a protocol ('tcp' or 'unix'),
+// host (or filepath if a unix socket),
+// scheme (http, https, unix, unixs).
+func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
+ proto = "tcp"
+ host = endpoint
+ url, uerr := url.Parse(endpoint)
+ if uerr != nil || !strings.Contains(endpoint, "://") {
+ return proto, host, scheme
+ }
+ scheme = url.Scheme
+
+ // strip scheme:// prefix since grpc dials by host
+ host = url.Host
+ switch url.Scheme {
+ case "http", "https":
+ case "unix", "unixs":
+ proto = "unix"
+ host = url.Host + url.Path
+ default:
+ proto, host = "", ""
+ }
+ return proto, host, scheme
+}
+
+// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
+// If the target is malformed, an error is returned.
+func ParseTarget(target string) (string, string, error) {
+ noPrefix := strings.TrimPrefix(target, targetPrefix)
+ if noPrefix == target {
+ return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
+ }
+ parts := strings.SplitN(noPrefix, "/", 2)
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
+ }
+ return parts[0], parts[1], nil
+}
+
+// ParseHostPort splits a "<host>:<port>" string into the host and port parts.
+// The port part is optional.
+func ParseHostPort(hostPort string) (host string, port string) {
+ parts := strings.SplitN(hostPort, ":", 2)
+ host = parts[0]
+ if len(parts) > 1 {
+ port = parts[1]
+ }
+ return host, port
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
new file mode 100644
index 0000000..a11faeb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/utils.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package balancer
+
+import (
+ "fmt"
+ "net/url"
+ "sort"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/resolver"
+)
+
+func scToString(sc balancer.SubConn) string {
+ return fmt.Sprintf("%p", sc)
+}
+
+func scsToStrings(scs map[resolver.Address]balancer.SubConn) (ss []string) {
+ ss = make([]string, 0, len(scs))
+ for a, sc := range scs {
+ ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
+ }
+ sort.Strings(ss)
+ return ss
+}
+
+func addrsToStrings(addrs []resolver.Address) (ss []string) {
+ ss = make([]string, len(addrs))
+ for i := range addrs {
+ ss[i] = addrs[i].Addr
+ }
+ sort.Strings(ss)
+ return ss
+}
+
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+ addrs = make([]resolver.Address, 0, len(eps))
+ for _, ep := range eps {
+ u, err := url.Parse(ep)
+ if err != nil {
+ addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
+ continue
+ }
+ addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
+ }
+ return addrs
+}
+
+var genN = new(uint32)
+
+func genName() string {
+ now := time.Now().UnixNano()
+ return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go
index 7132807..276b0f0 100644
--- a/vendor/go.etcd.io/etcd/clientv3/client.go
+++ b/vendor/go.etcd.io/etcd/clientv3/client.go
@@ -21,13 +21,18 @@
"fmt"
"net"
"net/url"
+ "os"
"strconv"
"strings"
"sync"
"time"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-
+ "github.com/google/uuid"
+ "go.etcd.io/etcd/clientv3/balancer"
+ "go.etcd.io/etcd/clientv3/balancer/picker"
+ "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ "go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@@ -39,8 +44,26 @@
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
ErrOldCluster = errors.New("etcdclient: old cluster version")
+
+ roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
)
+func init() {
+ lg := zap.NewNop()
+ if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
+ var err error
+ lg, err = zap.NewProductionConfig().Build() // info level logging
+ if err != nil {
+ panic(err)
+ }
+ }
+ balancer.RegisterBuilder(balancer.Config{
+ Policy: picker.RoundrobinBalanced,
+ Name: roundRobinBalancerName,
+ Logger: lg,
+ })
+}
+
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
@@ -50,13 +73,13 @@
Auth
Maintenance
- conn *grpc.ClientConn
- dialerrc chan error
+ conn *grpc.ClientConn
- cfg Config
- creds *credentials.TransportCredentials
- balancer *healthBalancer
- mu *sync.Mutex
+ cfg Config
+ creds *credentials.TransportCredentials
+ balancer balancer.Balancer
+ resolverGroup *endpoint.ResolverGroup
+ mu *sync.Mutex
ctx context.Context
cancel context.CancelFunc
@@ -69,6 +92,8 @@
tokenCred *authTokenCredential
callOpts []grpc.CallOption
+
+ lg *zap.Logger
}
// New creates a new etcdv3 client from a given configuration.
@@ -93,11 +118,19 @@
return New(Config{Endpoints: []string{url}})
}
+// NewFromURLs creates a new etcdv3 client from URLs.
+func NewFromURLs(urls []string) (*Client, error) {
+ return New(Config{Endpoints: urls})
+}
+
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
c.Watcher.Close()
c.Lease.Close()
+ if c.resolverGroup != nil {
+ c.resolverGroup.Close()
+ }
if c.conn != nil {
return toErr(c.ctx, c.conn.Close())
}
@@ -120,22 +153,9 @@
// SetEndpoints updates client's endpoints.
func (c *Client) SetEndpoints(eps ...string) {
c.mu.Lock()
+ defer c.mu.Unlock()
c.cfg.Endpoints = eps
- c.mu.Unlock()
- c.balancer.updateAddrs(eps...)
-
- // updating notifyCh can trigger new connections,
- // need update addrs if all connections are down
- // or addrs does not include pinAddr.
- c.balancer.mu.RLock()
- update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr)
- c.balancer.mu.RUnlock()
- if update {
- select {
- case c.balancer.updateAddrsC <- notifyNext:
- case <-c.balancer.stopc:
- }
- }
+ c.resolverGroup.SetEndpoints(eps)
}
// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
@@ -166,7 +186,7 @@
err := c.Sync(ctx)
cancel()
if err != nil && err != c.ctx.Err() {
- logger.Println("Auto sync endpoints failed:", err)
+ lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
}
}
}
@@ -185,32 +205,10 @@
cred.tokenMu.RLock()
defer cred.tokenMu.RUnlock()
return map[string]string{
- "token": cred.token,
+ rpctypes.TokenFieldNameGRPC: cred.token,
}, nil
}
-func parseEndpoint(endpoint string) (proto string, host string, scheme string) {
- proto = "tcp"
- host = endpoint
- url, uerr := url.Parse(endpoint)
- if uerr != nil || !strings.Contains(endpoint, "://") {
- return proto, host, scheme
- }
- scheme = url.Scheme
-
- // strip scheme:// prefix since grpc dials by host
- host = url.Host
- switch url.Scheme {
- case "http", "https":
- case "unix", "unixs":
- proto = "unix"
- host = url.Host + url.Path
- default:
- proto, host = "", ""
- }
- return proto, host, scheme
-}
-
func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) {
creds = c.creds
switch scheme {
@@ -230,63 +228,60 @@
return creds
}
-// dialSetupOpts gives the dial opts prior to any authentication
-func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
- if c.cfg.DialTimeout > 0 {
- opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
- }
+// dialSetupOpts gives the dial opts prior to any authentication.
+func (c *Client) dialSetupOpts(creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
if c.cfg.DialKeepAliveTime > 0 {
params := keepalive.ClientParameters{
- Time: c.cfg.DialKeepAliveTime,
- Timeout: c.cfg.DialKeepAliveTimeout,
+ Time: c.cfg.DialKeepAliveTime,
+ Timeout: c.cfg.DialKeepAliveTimeout,
+ PermitWithoutStream: c.cfg.PermitWithoutStream,
}
opts = append(opts, grpc.WithKeepaliveParams(params))
}
opts = append(opts, dopts...)
- f := func(host string, t time.Duration) (net.Conn, error) {
- proto, host, _ := parseEndpoint(c.balancer.endpoint(host))
- if host == "" && endpoint != "" {
- // dialing an endpoint not in the balancer; use
- // endpoint passed into dial
- proto, host, _ = parseEndpoint(endpoint)
- }
- if proto == "" {
- return nil, fmt.Errorf("unknown scheme for %q", host)
- }
+ // Provide a net dialer that supports cancelation and timeout.
+ f := func(dialEp string, t time.Duration) (net.Conn, error) {
+ proto, host, _ := endpoint.ParseEndpoint(dialEp)
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
dialer := &net.Dialer{Timeout: t}
- conn, err := dialer.DialContext(c.ctx, proto, host)
- if err != nil {
- select {
- case c.dialerrc <- err:
- default:
- }
- }
- return conn, err
+ return dialer.DialContext(c.ctx, proto, host)
}
opts = append(opts, grpc.WithDialer(f))
- creds := c.creds
- if _, _, scheme := parseEndpoint(endpoint); len(scheme) != 0 {
- creds = c.processCreds(scheme)
- }
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
- return opts
+ // Interceptor retry and backoff.
+ // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
+ // once it is available.
+ rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
+ opts = append(opts,
+ // Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
+ // Streams that are safe to retry are enabled individually.
+ grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
+ grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
+ )
+
+ return opts, nil
}
// Dial connects to a single endpoint using the client's config.
-func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
- return c.dial(endpoint)
+func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
+ creds := c.directDialCreds(ep)
+ // Use the grpc passthrough resolver to directly dial a single endpoint.
+ // This resolver passes through the 'unix' and 'unixs' endpoints schemes used
+ // by etcd without modification, allowing us to directly dial endpoints and
+ // using the same dial functions that we use for load balancer dialing.
+ return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
}
func (c *Client) getToken(ctx context.Context) error {
@@ -294,10 +289,19 @@
var auth *authenticator
for i := 0; i < len(c.cfg.Endpoints); i++ {
- endpoint := c.cfg.Endpoints[i]
- host := getHost(endpoint)
+ ep := c.cfg.Endpoints[i]
// use dial options without dopts to avoid reusing the client balancer
- auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c)
+ var dOpts []grpc.DialOption
+ _, host, _ := endpoint.ParseEndpoint(ep)
+ target := c.resolverGroup.Target(host)
+ creds := c.dialWithBalancerCreds(ep)
+ dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
+ if err != nil {
+ err = fmt.Errorf("failed to configure auth dialer: %v", err)
+ continue
+ }
+ dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
+ auth, err = newAuthenticator(ctx, target, dOpts, c)
if err != nil {
continue
}
@@ -306,6 +310,10 @@
var resp *AuthenticateResponse
resp, err = auth.authenticate(ctx, c.Username, c.Password)
if err != nil {
+ // return err without retrying other endpoints
+ if err == rpctypes.ErrAuthNotEnabled {
+ return err
+ }
continue
}
@@ -319,43 +327,91 @@
return err
}
-func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- opts := c.dialSetupOpts(endpoint, dopts...)
- host := getHost(endpoint)
+// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
+// of the provided endpoint determines the scheme used for all endpoints of the client connection.
+func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ _, host, _ := endpoint.ParseEndpoint(ep)
+ target := c.resolverGroup.Target(host)
+ creds := c.dialWithBalancerCreds(ep)
+ return c.dial(target, creds, dopts...)
+}
+
+// dial configures and dials any grpc balancer target.
+func (c *Client) dial(target string, creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ opts, err := c.dialSetupOpts(creds, dopts...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to configure dialer: %v", err)
+ }
+
if c.Username != "" && c.Password != "" {
c.tokenCred = &authTokenCredential{
tokenMu: &sync.RWMutex{},
}
- ctx := c.ctx
+ ctx, cancel := c.ctx, func() {}
if c.cfg.DialTimeout > 0 {
- cctx, cancel := context.WithTimeout(ctx, c.cfg.DialTimeout)
- defer cancel()
- ctx = cctx
+ ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
}
- err := c.getToken(ctx)
+ err = c.getToken(ctx)
if err != nil {
if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
err = context.DeadlineExceeded
}
+ cancel()
return nil, err
}
} else {
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
}
+ cancel()
}
opts = append(opts, c.cfg.DialOptions...)
- conn, err := grpc.DialContext(c.ctx, host, opts...)
+ dctx := c.ctx
+ if c.cfg.DialTimeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+ defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
+ }
+
+ conn, err := grpc.DialContext(dctx, target, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
+func (c *Client) directDialCreds(ep string) *credentials.TransportCredentials {
+ _, hostPort, scheme := endpoint.ParseEndpoint(ep)
+ creds := c.creds
+ if len(scheme) != 0 {
+ creds = c.processCreds(scheme)
+ if creds != nil {
+ c := *creds
+ clone := c.Clone()
+ // Set the server name must to the endpoint hostname without port since grpc
+ // otherwise attempts to check if x509 cert is valid for the full endpoint
+ // including the scheme and port, which fails.
+ host, _ := endpoint.ParseHostPort(hostPort)
+ clone.OverrideServerName(host)
+ creds = &clone
+ }
+ }
+ return creds
+}
+
+func (c *Client) dialWithBalancerCreds(ep string) *credentials.TransportCredentials {
+ _, _, scheme := endpoint.ParseEndpoint(ep)
+ creds := c.creds
+ if len(scheme) != 0 {
+ creds = c.processCreds(scheme)
+ }
+ return creds
+}
+
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
@@ -382,7 +438,6 @@
ctx, cancel := context.WithCancel(baseCtx)
client := &Client{
conn: nil,
- dialerrc: make(chan error, 1),
cfg: *cfg,
creds: creds,
ctx: ctx,
@@ -390,6 +445,17 @@
mu: new(sync.Mutex),
callOpts: defaultCallOpts,
}
+
+ lcfg := DefaultLogConfig
+ if cfg.LogConfig != nil {
+ lcfg = *cfg.LogConfig
+ }
+ var err error
+ client.lg, err = lcfg.Build()
+ if err != nil {
+ return nil, err
+ }
+
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
@@ -412,42 +478,31 @@
client.callOpts = callOpts
}
- client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) {
- return grpcHealthCheck(client, ep)
- })
-
- // use Endpoints[0] so that for https:// without any tls config given, then
- // grpc will assume the certificate server name is the endpoint host.
- conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
+ // Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
+ // to dial so the client knows to use this resolver.
+ client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
if err != nil {
client.cancel()
- client.balancer.Close()
return nil, err
}
- client.conn = conn
+ client.resolverGroup.SetEndpoints(cfg.Endpoints)
- // wait for a connection
- if cfg.DialTimeout > 0 {
- hasConn := false
- waitc := time.After(cfg.DialTimeout)
- select {
- case <-client.balancer.ready():
- hasConn = true
- case <-ctx.Done():
- case <-waitc:
- }
- if !hasConn {
- err := context.DeadlineExceeded
- select {
- case err = <-client.dialerrc:
- default:
- }
- client.cancel()
- client.balancer.Close()
- conn.Close()
- return nil, err
- }
+ if len(cfg.Endpoints) < 1 {
+ return nil, fmt.Errorf("at least one Endpoint must is required in client config")
}
+ dialEndpoint := cfg.Endpoints[0]
+
+ // Use a provided endpoint target so that for https:// without any tls config given, then
+ // grpc will assume the certificate server name is the endpoint host.
+ conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
+ if err != nil {
+ client.cancel()
+ client.resolverGroup.Close()
+ return nil, err
+ }
+ // TODO: With the old grpc balancer interface, we waited until the dial timeout
+ // for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
+ client.conn = conn
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
@@ -467,6 +522,22 @@
return client, nil
}
+// roundRobinQuorumBackoff retries against quorum between each backoff.
+// This is intended for use with a round robin load balancer.
+func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+ return func(attempt uint) time.Duration {
+ // after each round robin across quorum, backoff for our wait between duration
+ n := uint(len(c.Endpoints()))
+ quorum := (n/2 + 1)
+ if attempt%quorum == 0 {
+ c.lg.Info("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
+ return jitterUp(waitBetween, jitterFraction)
+ }
+ c.lg.Info("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
+ return 0
+ }
+}
+
func (c *Client) checkVersion() (err error) {
var wg sync.WaitGroup
errc := make(chan error, len(c.cfg.Endpoints))
@@ -551,18 +622,19 @@
if _, ok := err.(rpctypes.EtcdError); ok {
return err
}
- ev, _ := status.FromError(err)
- code := ev.Code()
- switch code {
- case codes.DeadlineExceeded:
- fallthrough
- case codes.Canceled:
- if ctx.Err() != nil {
- err = ctx.Err()
+ if ev, ok := status.FromError(err); ok {
+ code := ev.Code()
+ switch code {
+ case codes.DeadlineExceeded:
+ fallthrough
+ case codes.Canceled:
+ if ctx.Err() != nil {
+ err = ctx.Err()
+ }
+ case codes.Unavailable:
+ case codes.FailedPrecondition:
+ err = grpc.ErrClientConnClosing
}
- case codes.Unavailable:
- case codes.FailedPrecondition:
- err = grpc.ErrClientConnClosing
}
return err
}
@@ -574,3 +646,31 @@
return err == context.Canceled || err == context.DeadlineExceeded
}
+
+// IsConnCanceled returns true, if error is from a closed gRPC connection.
+// ref. https://github.com/grpc/grpc-go/pull/1854
+func IsConnCanceled(err error) bool {
+ if err == nil {
+ return false
+ }
+ // >= gRPC v1.10.x
+ s, ok := status.FromError(err)
+ if ok {
+ // connection is canceled or server has already closed the connection
+ return s.Code() == codes.Canceled || s.Message() == "transport is closing"
+ }
+ // >= gRPC v1.10.x
+ if err == context.Canceled {
+ return true
+ }
+ // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
+ return strings.Contains(err.Error(), "grpc: the client connection is closing")
+}
+
+func getHost(ep string) string {
+ url, uerr := url.Parse(ep)
+ if uerr != nil || !strings.Contains(ep, "://") {
+ return ep
+ }
+ return url.Host
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go
index 785672b..d497c05 100644
--- a/vendor/go.etcd.io/etcd/clientv3/cluster.go
+++ b/vendor/go.etcd.io/etcd/clientv3/cluster.go
@@ -17,8 +17,8 @@
import (
"context"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
"google.golang.org/grpc"
)
diff --git a/vendor/go.etcd.io/etcd/clientv3/compact_op.go b/vendor/go.etcd.io/etcd/clientv3/compact_op.go
index 41e80c1..5779713 100644
--- a/vendor/go.etcd.io/etcd/clientv3/compact_op.go
+++ b/vendor/go.etcd.io/etcd/clientv3/compact_op.go
@@ -15,7 +15,7 @@
package clientv3
import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
)
// CompactOp represents a compact operation.
diff --git a/vendor/go.etcd.io/etcd/clientv3/compare.go b/vendor/go.etcd.io/etcd/clientv3/compare.go
index b5f0a25..01ed68e 100644
--- a/vendor/go.etcd.io/etcd/clientv3/compare.go
+++ b/vendor/go.etcd.io/etcd/clientv3/compare.go
@@ -15,7 +15,7 @@
package clientv3
import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
)
type CompareTarget int
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
new file mode 100644
index 0000000..dcdbf51
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package concurrency implements concurrency operations on top of
+// etcd such as distributed locks, barriers, and elections.
+package concurrency
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
new file mode 100644
index 0000000..2521db6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
@@ -0,0 +1,254 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ v3 "go.etcd.io/etcd/clientv3"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+var (
+ ErrElectionNotLeader = errors.New("election: not leader")
+ ErrElectionNoLeader = errors.New("election: no leader")
+)
+
+type Election struct {
+ session *Session
+
+ keyPrefix string
+
+ leaderKey string
+ leaderRev int64
+ leaderSession *Session
+ hdr *pb.ResponseHeader
+}
+
+// NewElection returns a new election on a given key prefix.
+func NewElection(s *Session, pfx string) *Election {
+ return &Election{session: s, keyPrefix: pfx + "/"}
+}
+
+// ResumeElection initializes an election with a known leader.
+func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
+ return &Election{
+ keyPrefix: pfx,
+ session: s,
+ leaderKey: leaderKey,
+ leaderRev: leaderRev,
+ leaderSession: s,
+ }
+}
+
+// Campaign puts a value as eligible for the election on the prefix
+// key.
+// Multiple sessions can participate in the election for the
+// same prefix, but only one can be the leader at a time.
+//
+// If the context is 'context.TODO()/context.Background()', the Campaign
+// will continue to be blocked for other keys to be deleted, unless server
+// returns a non-recoverable error (e.g. ErrCompacted).
+// Otherwise, until the context is not cancelled or timed-out, Campaign will
+// continue to be blocked until it becomes the leader.
+func (e *Election) Campaign(ctx context.Context, val string) error {
+ s := e.session
+ client := e.session.Client()
+
+ k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
+ txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
+ txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
+ txn = txn.Else(v3.OpGet(k))
+ resp, err := txn.Commit()
+ if err != nil {
+ return err
+ }
+ e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
+ if !resp.Succeeded {
+ kv := resp.Responses[0].GetResponseRange().Kvs[0]
+ e.leaderRev = kv.CreateRevision
+ if string(kv.Value) != val {
+ if err = e.Proclaim(ctx, val); err != nil {
+ e.Resign(ctx)
+ return err
+ }
+ }
+ }
+
+ _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
+ if err != nil {
+ // clean up in case of context cancel
+ select {
+ case <-ctx.Done():
+ e.Resign(client.Ctx())
+ default:
+ e.leaderSession = nil
+ }
+ return err
+ }
+ e.hdr = resp.Header
+
+ return nil
+}
+
+// Proclaim lets the leader announce a new value without another election.
+func (e *Election) Proclaim(ctx context.Context, val string) error {
+ if e.leaderSession == nil {
+ return ErrElectionNotLeader
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ txn := client.Txn(ctx).If(cmp)
+ txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
+ tresp, terr := txn.Commit()
+ if terr != nil {
+ return terr
+ }
+ if !tresp.Succeeded {
+ e.leaderKey = ""
+ return ErrElectionNotLeader
+ }
+
+ e.hdr = tresp.Header
+ return nil
+}
+
+// Resign lets a leader start a new election.
+func (e *Election) Resign(ctx context.Context) (err error) {
+ if e.leaderSession == nil {
+ return nil
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
+ if err == nil {
+ e.hdr = resp.Header
+ }
+ e.leaderKey = ""
+ e.leaderSession = nil
+ return err
+}
+
+// Leader returns the leader value for the current election.
+func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
+ client := e.session.Client()
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return nil, err
+ } else if len(resp.Kvs) == 0 {
+ // no leader currently elected
+ return nil, ErrElectionNoLeader
+ }
+ return resp, nil
+}
+
+// Observe returns a channel that reliably observes ordered leader proposals
+// as GetResponse values on every current elected leader key. It will not
+// necessarily fetch all historical leader updates, but will always post the
+// most recent leader value.
+//
+// The channel closes when the context is canceled or the underlying watcher
+// is otherwise disrupted.
+func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
+ retc := make(chan v3.GetResponse)
+ go e.observe(ctx, retc)
+ return retc
+}
+
+func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
+ client := e.session.Client()
+
+ defer close(ch)
+ for {
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return
+ }
+
+ var kv *mvccpb.KeyValue
+ var hdr *pb.ResponseHeader
+
+ if len(resp.Kvs) == 0 {
+ cctx, cancel := context.WithCancel(ctx)
+ // wait for first key put on prefix
+ opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
+ wch := client.Watch(cctx, e.keyPrefix, opts...)
+ for kv == nil {
+ wr, ok := <-wch
+ if !ok || wr.Err() != nil {
+ cancel()
+ return
+ }
+ // only accept puts; a delete will make observe() spin
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.PUT {
+ hdr, kv = &wr.Header, ev.Kv
+ // may have multiple revs; hdr.rev = the last rev
+ // set to kv's rev in case batch has multiple Puts
+ hdr.Revision = kv.ModRevision
+ break
+ }
+ }
+ }
+ cancel()
+ } else {
+ hdr, kv = resp.Header, resp.Kvs[0]
+ }
+
+ select {
+ case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
+ case <-ctx.Done():
+ return
+ }
+
+ cctx, cancel := context.WithCancel(ctx)
+ wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
+ keyDeleted := false
+ for !keyDeleted {
+ wr, ok := <-wch
+ if !ok {
+ cancel()
+ return
+ }
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.DELETE {
+ keyDeleted = true
+ break
+ }
+ resp.Header = &wr.Header
+ resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
+ select {
+ case ch <- *resp:
+ case <-cctx.Done():
+ cancel()
+ return
+ }
+ }
+ }
+ cancel()
+ }
+}
+
+// Key returns the leader key if elected, empty string otherwise.
+func (e *Election) Key() string { return e.leaderKey }
+
+// Rev returns the leader key's creation revision, if elected.
+func (e *Election) Rev() int64 { return e.leaderRev }
+
+// Header is the response header from the last successful election proposal.
+func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
new file mode 100644
index 0000000..e4cf775
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
@@ -0,0 +1,65 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "fmt"
+
+ v3 "go.etcd.io/etcd/clientv3"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var wr v3.WatchResponse
+ wch := client.Watch(cctx, key, v3.WithRev(rev))
+ for wr = range wch {
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.DELETE {
+ return nil
+ }
+ }
+ }
+ if err := wr.Err(); err != nil {
+ return err
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return fmt.Errorf("lost watcher waiting for delete")
+}
+
+// waitDeletes efficiently waits until all keys matching the prefix and no greater
+// than the create revision.
+func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
+ getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
+ for {
+ resp, err := client.Get(ctx, pfx, getOpts...)
+ if err != nil {
+ return nil, err
+ }
+ if len(resp.Kvs) == 0 {
+ return resp.Header, nil
+ }
+ lastKey := string(resp.Kvs[0].Key)
+ if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
+ return nil, err
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
new file mode 100644
index 0000000..0135341
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
@@ -0,0 +1,117 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ v3 "go.etcd.io/etcd/clientv3"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+// Mutex implements the sync Locker interface with etcd
+type Mutex struct {
+ s *Session
+
+ pfx string
+ myKey string
+ myRev int64
+ hdr *pb.ResponseHeader
+}
+
+func NewMutex(s *Session, pfx string) *Mutex {
+ return &Mutex{s, pfx + "/", "", -1, nil}
+}
+
+// Lock locks the mutex with a cancelable context. If the context is canceled
+// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
+func (m *Mutex) Lock(ctx context.Context) error {
+ s := m.s
+ client := m.s.Client()
+
+ m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
+ cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
+ // put self in lock waiters via myKey; oldest waiter holds lock
+ put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
+ // reuse key in case this session already holds the lock
+ get := v3.OpGet(m.myKey)
+ // fetch current holder to complete uncontended path with only one RPC
+ getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
+ resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
+ if err != nil {
+ return err
+ }
+ m.myRev = resp.Header.Revision
+ if !resp.Succeeded {
+ m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+
+ // wait for deletion revisions prior to myKey
+ hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
+ // release lock key if wait failed
+ if werr != nil {
+ m.Unlock(client.Ctx())
+ } else {
+ m.hdr = hdr
+ }
+ return werr
+}
+
+func (m *Mutex) Unlock(ctx context.Context) error {
+ client := m.s.Client()
+ if _, err := client.Delete(ctx, m.myKey); err != nil {
+ return err
+ }
+ m.myKey = "\x00"
+ m.myRev = -1
+ return nil
+}
+
+func (m *Mutex) IsOwner() v3.Cmp {
+ return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
+}
+
+func (m *Mutex) Key() string { return m.myKey }
+
+// Header is the response header received from etcd on acquiring the lock.
+func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
+
+type lockerMutex struct{ *Mutex }
+
+func (lm *lockerMutex) Lock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Lock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+func (lm *lockerMutex) Unlock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+
+// NewLocker creates a sync.Locker backed by an etcd mutex.
+func NewLocker(s *Session, pfx string) sync.Locker {
+ return &lockerMutex{NewMutex(s, pfx)}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
new file mode 100644
index 0000000..598ec0e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "time"
+
+ v3 "go.etcd.io/etcd/clientv3"
+)
+
+const defaultSessionTTL = 60
+
+// Session represents a lease kept alive for the lifetime of a client.
+// Fault-tolerant applications may use sessions to reason about liveness.
+type Session struct {
+ client *v3.Client
+ opts *sessionOptions
+ id v3.LeaseID
+
+ cancel context.CancelFunc
+ donec <-chan struct{}
+}
+
+// NewSession gets the leased session for a client.
+func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
+ ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
+ for _, opt := range opts {
+ opt(ops)
+ }
+
+ id := ops.leaseID
+ if id == v3.NoLease {
+ resp, err := client.Grant(ops.ctx, int64(ops.ttl))
+ if err != nil {
+ return nil, err
+ }
+ id = v3.LeaseID(resp.ID)
+ }
+
+ ctx, cancel := context.WithCancel(ops.ctx)
+ keepAlive, err := client.KeepAlive(ctx, id)
+ if err != nil || keepAlive == nil {
+ cancel()
+ return nil, err
+ }
+
+ donec := make(chan struct{})
+ s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
+
+ // keep the lease alive until client error or cancelled context
+ go func() {
+ defer close(donec)
+ for range keepAlive {
+ // eat messages until keep alive channel closes
+ }
+ }()
+
+ return s, nil
+}
+
+// Client is the etcd client that is attached to the session.
+func (s *Session) Client() *v3.Client {
+ return s.client
+}
+
+// Lease is the lease ID for keys bound to the session.
+func (s *Session) Lease() v3.LeaseID { return s.id }
+
+// Done returns a channel that closes when the lease is orphaned, expires, or
+// is otherwise no longer being refreshed.
+func (s *Session) Done() <-chan struct{} { return s.donec }
+
+// Orphan ends the refresh for the session lease. This is useful
+// in case the state of the client connection is indeterminate (revoke
+// would fail) or when transferring lease ownership.
+func (s *Session) Orphan() {
+ s.cancel()
+ <-s.donec
+}
+
+// Close orphans the session and revokes the session lease.
+func (s *Session) Close() error {
+ s.Orphan()
+ // if revoke takes longer than the ttl, lease is expired anyway
+ ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
+ _, err := s.client.Revoke(ctx, s.id)
+ cancel()
+ return err
+}
+
+type sessionOptions struct {
+ ttl int
+ leaseID v3.LeaseID
+ ctx context.Context
+}
+
+// SessionOption configures Session.
+type SessionOption func(*sessionOptions)
+
+// WithTTL configures the session's TTL in seconds.
+// If TTL is <= 0, the default 60 seconds TTL will be used.
+func WithTTL(ttl int) SessionOption {
+ return func(so *sessionOptions) {
+ if ttl > 0 {
+ so.ttl = ttl
+ }
+ }
+}
+
+// WithLease specifies the existing leaseID to be used for the session.
+// This is useful in process restart scenario, for example, to reclaim
+// leadership from an election prior to restart.
+func WithLease(leaseID v3.LeaseID) SessionOption {
+ return func(so *sessionOptions) {
+ so.leaseID = leaseID
+ }
+}
+
+// WithContext assigns a context to the session instead of defaulting to
+// using the client context. This is useful for canceling NewSession and
+// Close operations immediately without having to close the client. If the
+// context is canceled before Close() completes, the session's lease will be
+// abandoned and left to expire instead of being revoked.
+func WithContext(ctx context.Context) SessionOption {
+ return func(so *sessionOptions) {
+ so.ctx = ctx
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
new file mode 100644
index 0000000..ee11510
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
@@ -0,0 +1,387 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "math"
+
+ v3 "go.etcd.io/etcd/clientv3"
+)
+
+// STM is an interface for software transactional memory.
+type STM interface {
+ // Get returns the value for a key and inserts the key in the txn's read set.
+ // If Get fails, it aborts the transaction with an error, never returning.
+ Get(key ...string) string
+ // Put adds a value for a key to the write set.
+ Put(key, val string, opts ...v3.OpOption)
+ // Rev returns the revision of a key in the read set.
+ Rev(key string) int64
+ // Del deletes a key.
+ Del(key string)
+
+ // commit attempts to apply the txn's changes to the server.
+ commit() *v3.TxnResponse
+ reset()
+}
+
+// Isolation is an enumeration of transactional isolation levels which
+// describes how transactions should interfere and conflict.
+type Isolation int
+
+const (
+ // SerializableSnapshot provides serializable isolation and also checks
+ // for write conflicts.
+ SerializableSnapshot Isolation = iota
+ // Serializable reads within the same transaction attempt return data
+ // from the at the revision of the first read.
+ Serializable
+ // RepeatableReads reads within the same transaction attempt always
+ // return the same data.
+ RepeatableReads
+ // ReadCommitted reads keys from any committed revision.
+ ReadCommitted
+)
+
+// stmError safely passes STM errors through panic to the STM error channel.
+type stmError struct{ err error }
+
+type stmOptions struct {
+ iso Isolation
+ ctx context.Context
+ prefetch []string
+}
+
+type stmOption func(*stmOptions)
+
+// WithIsolation specifies the transaction isolation level.
+func WithIsolation(lvl Isolation) stmOption {
+ return func(so *stmOptions) { so.iso = lvl }
+}
+
+// WithAbortContext specifies the context for permanently aborting the transaction.
+func WithAbortContext(ctx context.Context) stmOption {
+ return func(so *stmOptions) { so.ctx = ctx }
+}
+
+// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
+// If an STM transaction will unconditionally fetch a set of keys, prefetching
+// those keys will save the round-trip cost from requesting each key one by one
+// with Get().
+func WithPrefetch(keys ...string) stmOption {
+ return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
+}
+
+// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
+func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
+ opts := &stmOptions{ctx: c.Ctx()}
+ for _, f := range so {
+ f(opts)
+ }
+ if len(opts.prefetch) != 0 {
+ f := apply
+ apply = func(s STM) error {
+ s.Get(opts.prefetch...)
+ return f(s)
+ }
+ }
+ return runSTM(mkSTM(c, opts), apply)
+}
+
+func mkSTM(c *v3.Client, opts *stmOptions) STM {
+ switch opts.iso {
+ case SerializableSnapshot:
+ s := &stmSerializable{
+ stm: stm{client: c, ctx: opts.ctx},
+ prefetch: make(map[string]*v3.GetResponse),
+ }
+ s.conflicts = func() []v3.Cmp {
+ return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
+ }
+ return s
+ case Serializable:
+ s := &stmSerializable{
+ stm: stm{client: c, ctx: opts.ctx},
+ prefetch: make(map[string]*v3.GetResponse),
+ }
+ s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+ return s
+ case RepeatableReads:
+ s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+ s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+ return s
+ case ReadCommitted:
+ s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+ s.conflicts = func() []v3.Cmp { return nil }
+ return s
+ default:
+ panic("unsupported stm")
+ }
+}
+
+type stmResponse struct {
+ resp *v3.TxnResponse
+ err error
+}
+
+func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
+ outc := make(chan stmResponse, 1)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ e, ok := r.(stmError)
+ if !ok {
+ // client apply panicked
+ panic(r)
+ }
+ outc <- stmResponse{nil, e.err}
+ }
+ }()
+ var out stmResponse
+ for {
+ s.reset()
+ if out.err = apply(s); out.err != nil {
+ break
+ }
+ if out.resp = s.commit(); out.resp != nil {
+ break
+ }
+ }
+ outc <- out
+ }()
+ r := <-outc
+ return r.resp, r.err
+}
+
+// stm implements repeatable-read software transactional memory over etcd
+type stm struct {
+ client *v3.Client
+ ctx context.Context
+ // rset holds read key values and revisions
+ rset readSet
+ // wset holds overwritten keys and their values
+ wset writeSet
+ // getOpts are the opts used for gets
+ getOpts []v3.OpOption
+ // conflicts computes the current conflicts on the txn
+ conflicts func() []v3.Cmp
+}
+
+type stmPut struct {
+ val string
+ op v3.Op
+}
+
+type readSet map[string]*v3.GetResponse
+
+func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
+ for i, resp := range txnresp.Responses {
+ rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
+ }
+}
+
+// first returns the store revision from the first fetch
+func (rs readSet) first() int64 {
+ ret := int64(math.MaxInt64 - 1)
+ for _, resp := range rs {
+ if rev := resp.Header.Revision; rev < ret {
+ ret = rev
+ }
+ }
+ return ret
+}
+
+// cmps guards the txn from updates to read set
+func (rs readSet) cmps() []v3.Cmp {
+ cmps := make([]v3.Cmp, 0, len(rs))
+ for k, rk := range rs {
+ cmps = append(cmps, isKeyCurrent(k, rk))
+ }
+ return cmps
+}
+
+type writeSet map[string]stmPut
+
+func (ws writeSet) get(keys ...string) *stmPut {
+ for _, key := range keys {
+ if wv, ok := ws[key]; ok {
+ return &wv
+ }
+ }
+ return nil
+}
+
+// cmps returns a cmp list testing no writes have happened past rev
+func (ws writeSet) cmps(rev int64) []v3.Cmp {
+ cmps := make([]v3.Cmp, 0, len(ws))
+ for key := range ws {
+ cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
+ }
+ return cmps
+}
+
+// puts is the list of ops for all pending writes
+func (ws writeSet) puts() []v3.Op {
+ puts := make([]v3.Op, 0, len(ws))
+ for _, v := range ws {
+ puts = append(puts, v.op)
+ }
+ return puts
+}
+
+func (s *stm) Get(keys ...string) string {
+ if wv := s.wset.get(keys...); wv != nil {
+ return wv.val
+ }
+ return respToValue(s.fetch(keys...))
+}
+
+func (s *stm) Put(key, val string, opts ...v3.OpOption) {
+ s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
+}
+
+func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
+
+func (s *stm) Rev(key string) int64 {
+ if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
+ return resp.Kvs[0].ModRevision
+ }
+ return 0
+}
+
+func (s *stm) commit() *v3.TxnResponse {
+ txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ if txnresp.Succeeded {
+ return txnresp
+ }
+ return nil
+}
+
+func (s *stm) fetch(keys ...string) *v3.GetResponse {
+ if len(keys) == 0 {
+ return nil
+ }
+ ops := make([]v3.Op, len(keys))
+ for i, key := range keys {
+ if resp, ok := s.rset[key]; ok {
+ return resp
+ }
+ ops[i] = v3.OpGet(key, s.getOpts...)
+ }
+ txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ s.rset.add(keys, txnresp)
+ return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
+}
+
+func (s *stm) reset() {
+ s.rset = make(map[string]*v3.GetResponse)
+ s.wset = make(map[string]stmPut)
+}
+
+type stmSerializable struct {
+ stm
+ prefetch map[string]*v3.GetResponse
+}
+
+func (s *stmSerializable) Get(keys ...string) string {
+ if wv := s.wset.get(keys...); wv != nil {
+ return wv.val
+ }
+ firstRead := len(s.rset) == 0
+ for _, key := range keys {
+ if resp, ok := s.prefetch[key]; ok {
+ delete(s.prefetch, key)
+ s.rset[key] = resp
+ }
+ }
+ resp := s.stm.fetch(keys...)
+ if firstRead {
+ // txn's base revision is defined by the first read
+ s.getOpts = []v3.OpOption{
+ v3.WithRev(resp.Header.Revision),
+ v3.WithSerializable(),
+ }
+ }
+ return respToValue(resp)
+}
+
+func (s *stmSerializable) Rev(key string) int64 {
+ s.Get(key)
+ return s.stm.Rev(key)
+}
+
+func (s *stmSerializable) gets() ([]string, []v3.Op) {
+ keys := make([]string, 0, len(s.rset))
+ ops := make([]v3.Op, 0, len(s.rset))
+ for k := range s.rset {
+ keys = append(keys, k)
+ ops = append(ops, v3.OpGet(k))
+ }
+ return keys, ops
+}
+
+func (s *stmSerializable) commit() *v3.TxnResponse {
+ keys, getops := s.gets()
+ txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
+ // use Else to prefetch keys in case of conflict to save a round trip
+ txnresp, err := txn.Else(getops...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ if txnresp.Succeeded {
+ return txnresp
+ }
+ // load prefetch with Else data
+ s.rset.add(keys, txnresp)
+ s.prefetch = s.rset
+ s.getOpts = nil
+ return nil
+}
+
+func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
+ if len(r.Kvs) != 0 {
+ return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
+ }
+ return v3.Compare(v3.ModRevision(k), "=", 0)
+}
+
+func respToValue(resp *v3.GetResponse) string {
+ if resp == nil || len(resp.Kvs) == 0 {
+ return ""
+ }
+ return string(resp.Kvs[0].Value)
+}
+
+// NewSTMRepeatable is deprecated.
+func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
+}
+
+// NewSTMSerializable is deprecated.
+func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
+}
+
+// NewSTMReadCommitted is deprecated.
+func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go
index 79d6e2a..96e94e1 100644
--- a/vendor/go.etcd.io/etcd/clientv3/config.go
+++ b/vendor/go.etcd.io/etcd/clientv3/config.go
@@ -19,6 +19,7 @@
"crypto/tls"
"time"
+ "go.uber.org/zap"
"google.golang.org/grpc"
)
@@ -72,4 +73,30 @@
// Context is the default client context; it can be used to cancel grpc dial out and
// other operations that do not have an explicit context.
Context context.Context
+
+ // LogConfig configures client-side logger.
+ // If nil, use the default logger.
+ // TODO: configure gRPC logger
+ LogConfig *zap.Config
+
+ // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
+ PermitWithoutStream bool `json:"permit-without-stream"`
+}
+
+// DefaultLogConfig is the default client logging configuration.
+// Default log level is "Warn". Use "zap.InfoLevel" for debugging.
+// Use "/dev/null" for output paths, to discard all logs.
+var DefaultLogConfig = zap.Config{
+ Level: zap.NewAtomicLevelAt(zap.WarnLevel),
+ Development: false,
+ Sampling: &zap.SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+ Encoding: "json",
+ EncoderConfig: zap.NewProductionEncoderConfig(),
+
+ // Use "/dev/null" to discard all
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go
index 717fbe4..01a3f59 100644
--- a/vendor/go.etcd.io/etcd/clientv3/doc.go
+++ b/vendor/go.etcd.io/etcd/clientv3/doc.go
@@ -19,7 +19,7 @@
// // expect dial time-out on ipv4 blackhole
// _, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"http://254.0.0.1:12345"},
-// DialTimeout: 2 * time.Second
+// DialTimeout: 2 * time.Second,
// })
//
// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
@@ -61,7 +61,7 @@
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
+// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
//
// Here is the example code to handle client errors:
//
@@ -71,14 +71,14 @@
// // ctx is canceled by another routine
// } else if err == context.DeadlineExceeded {
// // ctx is attached with a deadline and it exceeded
+// } else if err == rpctypes.ErrEmptyKey {
+// // client-side error: key is not provided
// } else if ev, ok := status.FromError(err); ok {
// code := ev.Code()
// if code == codes.DeadlineExceeded {
// // server-side context might have timed-out first (due to clock skew)
// // while original client-side context is not timed-out yet
// }
-// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
-// // process (verr.Errors)
// } else {
// // bad cluster endpoints, which are not etcd servers
// }
@@ -87,11 +87,20 @@
// go func() { cli.Close() }()
// _, err := kvc.Get(ctx, "a")
// if err != nil {
+// // with etcd clientv3 <= v3.3
// if err == context.Canceled {
// // grpc balancer calls 'Get' with an inflight client.Close
// } else if err == grpc.ErrClientConnClosing {
// // grpc balancer calls 'Get' after client.Close.
// }
+// // with etcd clientv3 >= v3.4
+// if clientv3.IsConnCanceled(err) {
+// // gRPC client connection is closed
+// }
// }
//
+// The grpc load balancer is registered statically and is shared across etcd clients.
+// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
+// variable. E.g. "ETCD_CLIENT_DEBUG=1".
+//
package clientv3
diff --git a/vendor/go.etcd.io/etcd/clientv3/kv.go b/vendor/go.etcd.io/etcd/clientv3/kv.go
index 5a7469b..2b7864a 100644
--- a/vendor/go.etcd.io/etcd/clientv3/kv.go
+++ b/vendor/go.etcd.io/etcd/clientv3/kv.go
@@ -17,7 +17,7 @@
import (
"context"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
diff --git a/vendor/go.etcd.io/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go
index 3729cf3..c2796fc 100644
--- a/vendor/go.etcd.io/etcd/clientv3/lease.go
+++ b/vendor/go.etcd.io/etcd/clientv3/lease.go
@@ -19,9 +19,10 @@
"sync"
"time"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
@@ -117,22 +118,21 @@
// Leases retrieves all leases.
Leases(ctx context.Context) (*LeaseLeasesResponse, error)
- // KeepAlive keeps the given lease alive forever. If the keepalive response
- // posted to the channel is not consumed immediately, the lease client will
- // continue sending keep alive requests to the etcd server at least every
- // second until latest response is consumed.
+ // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted
+ // to the channel are not consumed promptly the channel may become full. When full, the lease
+ // client will continue sending keep alive requests to the etcd server, but will drop responses
+ // until there is capacity on the channel to send more responses.
+ //
+ // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or
+ // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error
+ // containing the error reason.
//
// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
// alive stream is interrupted in some way the client cannot handle itself;
- // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
- // from this closed channel is nil.
- //
- // If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
- // no leader") or canceled by the caller (e.g. context.Canceled), the error
- // is returned. Otherwise, it retries.
+ // given context "ctx" is canceled or timed out.
//
// TODO(v4.0): post errors to last keep alive message before closing
- // (see https://github.com/coreos/etcd/pull/7866)
+ // (see https://github.com/etcd-io/etcd/pull/7866)
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. The response corresponds to the
@@ -172,6 +172,8 @@
firstKeepAliveOnce sync.Once
callOpts []grpc.CallOption
+
+ lg *zap.Logger
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
@@ -196,6 +198,7 @@
keepAlives: make(map[LeaseID]*keepAlive),
remote: remote,
firstKeepAliveTimeout: keepAliveTimeout,
+ lg: c.lg,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
@@ -291,7 +294,7 @@
}
l.mu.Unlock()
- go l.keepAliveCtxCloser(id, ctx, ka.donec)
+ go l.keepAliveCtxCloser(ctx, id, ka.donec)
l.firstKeepAliveOnce.Do(func() {
go l.recvKeepAliveLoop()
go l.deadlineLoop()
@@ -323,7 +326,7 @@
return nil
}
-func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
+func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
select {
case <-donec:
return
@@ -459,7 +462,6 @@
select {
case <-time.After(retryConnWait):
- continue
case <-l.stopCtx.Done():
return l.stopCtx.Err()
}
@@ -469,7 +471,7 @@
// resetRecv opens a new lease stream and starts sending keep alive requests.
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
sctx, cancel := context.WithCancel(l.stopCtx)
- stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
+ stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
if err != nil {
cancel()
return nil, err
@@ -518,6 +520,12 @@
select {
case ch <- karesp:
default:
+ if l.lg != nil {
+ l.lg.Warn("lease keepalive response queue is full; dropping response send",
+ zap.Int("queue-size", len(ch)),
+ zap.Int("queue-capacity", cap(ch)),
+ )
+ }
}
// still advance in order to rate-limit keep-alive sends
ka.nextKeepAlive = nextKeepAlive
@@ -569,7 +577,7 @@
}
select {
- case <-time.After(500 * time.Millisecond):
+ case <-time.After(retryConnWait):
case <-stream.Context().Done():
return
case <-l.donec:
diff --git a/vendor/go.etcd.io/etcd/clientv3/logger.go b/vendor/go.etcd.io/etcd/clientv3/logger.go
index 782e313..f5ae010 100644
--- a/vendor/go.etcd.io/etcd/clientv3/logger.go
+++ b/vendor/go.etcd.io/etcd/clientv3/logger.go
@@ -18,28 +18,14 @@
"io/ioutil"
"sync"
+ "go.etcd.io/etcd/pkg/logutil"
+
"google.golang.org/grpc/grpclog"
)
-// Logger is the logger used by client library.
-// It implements grpclog.LoggerV2 interface.
-type Logger interface {
- grpclog.LoggerV2
-
- // Lvl returns logger if logger's verbosity level >= "lvl".
- // Otherwise, logger that discards all logs.
- Lvl(lvl int) Logger
-
- // to satisfy capnslog
-
- Print(args ...interface{})
- Printf(format string, args ...interface{})
- Println(args ...interface{})
-}
-
var (
- loggerMu sync.RWMutex
- logger Logger
+ lgMu sync.RWMutex
+ lg logutil.Logger
)
type settableLogger struct {
@@ -49,29 +35,29 @@
func init() {
// disable client side logs by default
- logger = &settableLogger{}
+ lg = &settableLogger{}
SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
}
// SetLogger sets client-side Logger.
func SetLogger(l grpclog.LoggerV2) {
- loggerMu.Lock()
- logger = NewLogger(l)
+ lgMu.Lock()
+ lg = logutil.NewLogger(l)
// override grpclog so that any changes happen with locking
- grpclog.SetLoggerV2(logger)
- loggerMu.Unlock()
+ grpclog.SetLoggerV2(lg)
+ lgMu.Unlock()
}
-// GetLogger returns the current logger.
-func GetLogger() Logger {
- loggerMu.RLock()
- l := logger
- loggerMu.RUnlock()
+// GetLogger returns the current logutil.Logger.
+func GetLogger() logutil.Logger {
+ lgMu.RLock()
+ l := lg
+ lgMu.RUnlock()
return l
}
-// NewLogger returns a new Logger with grpclog.LoggerV2.
-func NewLogger(gl grpclog.LoggerV2) Logger {
+// NewLogger returns a new Logger with logutil.Logger.
+func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
return &settableLogger{l: gl}
}
@@ -104,32 +90,12 @@
func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
func (s *settableLogger) V(l int) bool { return s.get().V(l) }
-func (s *settableLogger) Lvl(lvl int) Logger {
+func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
s.mu.RLock()
l := s.l
s.mu.RUnlock()
if l.V(lvl) {
return s
}
- return &noLogger{}
+ return logutil.NewDiscardLogger()
}
-
-type noLogger struct{}
-
-func (*noLogger) Info(args ...interface{}) {}
-func (*noLogger) Infof(format string, args ...interface{}) {}
-func (*noLogger) Infoln(args ...interface{}) {}
-func (*noLogger) Warning(args ...interface{}) {}
-func (*noLogger) Warningf(format string, args ...interface{}) {}
-func (*noLogger) Warningln(args ...interface{}) {}
-func (*noLogger) Error(args ...interface{}) {}
-func (*noLogger) Errorf(format string, args ...interface{}) {}
-func (*noLogger) Errorln(args ...interface{}) {}
-func (*noLogger) Fatal(args ...interface{}) {}
-func (*noLogger) Fatalf(format string, args ...interface{}) {}
-func (*noLogger) Fatalln(args ...interface{}) {}
-func (*noLogger) Print(args ...interface{}) {}
-func (*noLogger) Printf(format string, args ...interface{}) {}
-func (*noLogger) Println(args ...interface{}) {}
-func (*noLogger) V(l int) bool { return false }
-func (ng *noLogger) Lvl(lvl int) Logger { return ng }
diff --git a/vendor/go.etcd.io/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
index f60cfbe..744455a 100644
--- a/vendor/go.etcd.io/etcd/clientv3/maintenance.go
+++ b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
@@ -16,9 +16,10 @@
import (
"context"
+ "fmt"
"io"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
@@ -57,6 +58,8 @@
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
// Snapshot provides a reader for a point-in-time snapshot of etcd.
+ // If the context "ctx" is canceled or timed out, reading from returned
+ // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
Snapshot(ctx context.Context) (io.ReadCloser, error)
// MoveLeader requests current leader to transfer its leadership to the transferee.
@@ -73,9 +76,9 @@
func NewMaintenance(c *Client) Maintenance {
api := &maintenance{
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
- conn, err := c.dial(endpoint)
+ conn, err := c.Dial(endpoint)
if err != nil {
- return nil, nil, err
+ return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
}
cancel := func() { conn.Close() }
return RetryMaintenanceClient(c, conn), cancel, nil
@@ -173,6 +176,7 @@
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
+
return nil, toErr(ctx, err)
}
defer cancel()
@@ -184,7 +188,7 @@
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
- ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...)
+ ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
if err != nil {
return nil, toErr(ctx, err)
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go
index c6ec5bf..13507c9 100644
--- a/vendor/go.etcd.io/etcd/clientv3/op.go
+++ b/vendor/go.etcd.io/etcd/clientv3/op.go
@@ -14,7 +14,7 @@
package clientv3
-import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+import pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
type opType int
@@ -26,9 +26,7 @@
tTxn
)
-var (
- noPrefixEnd = []byte{0}
-)
+var noPrefixEnd = []byte{0}
// Op represents an Operation that kv can execute.
type Op struct {
@@ -53,6 +51,12 @@
// for watch, put, delete
prevKV bool
+ // for watch
+ // fragmentation should be disabled by default
+ // if true, split watch events when total exceeds
+ // "--max-request-bytes" flag value + 512-byte
+ fragment bool
+
// for put
ignoreValue bool
ignoreLease bool
@@ -77,8 +81,15 @@
// accessors / mutators
-func (op Op) IsTxn() bool { return op.t == tTxn }
-func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
+// IsTxn returns true if the "Op" type is transaction.
+func (op Op) IsTxn() bool {
+ return op.t == tTxn
+}
+
+// Txn returns the comparison(if) operations, "then" operations, and "else" operations.
+func (op Op) Txn() ([]Cmp, []Op, []Op) {
+ return op.cmps, op.thenOps, op.elseOps
+}
// KeyBytes returns the byte slice holding the Op's key.
func (op Op) KeyBytes() []byte { return op.key }
@@ -205,12 +216,14 @@
return op.t != tRange
}
+// OpGet returns "get" operation based on given key and operation options.
func OpGet(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
return ret
}
+// OpDelete returns "delete" operation based on given key and operation options.
func OpDelete(key string, opts ...OpOption) Op {
ret := Op{t: tDeleteRange, key: []byte(key)}
ret.applyOpts(opts)
@@ -239,6 +252,7 @@
return ret
}
+// OpPut returns "put" operation based on given key-value and operation options.
func OpPut(key, val string, opts ...OpOption) Op {
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
ret.applyOpts(opts)
@@ -267,6 +281,7 @@
return ret
}
+// OpTxn returns "txn" operation based on given transaction conditions.
func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
}
@@ -377,7 +392,14 @@
// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
// to be equal or greater than the key in the argument.
-func WithFromKey() OpOption { return WithRange("\x00") }
+func WithFromKey() OpOption {
+ return func(op *Op) {
+ if len(op.key) == 0 {
+ op.key = []byte{0}
+ }
+ op.end = []byte("\x00")
+ }
+}
// WithSerializable makes 'Get' request serializable. By default,
// it's linearizable. Serializable requests are better for lower latency
@@ -466,6 +488,17 @@
}
}
+// WithFragment to receive raw watch response with fragmentation.
+// Fragmentation is disabled by default. If fragmentation is enabled,
+// etcd watch server will split watch response before sending to clients
+// when the total size of watch events exceed server-side request limit.
+// The default server-side request limit is 1.5 MiB, which can be configured
+// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
+// See "etcdserver/api/v3rpc/watch.go" for more details.
+func WithFragment() OpOption {
+ return func(op *Op) { op.fragment = true }
+}
+
// WithIgnoreValue updates the key using its current value.
// This option can not be combined with non-empty values.
// Returns an error if the key does not exist.
diff --git a/vendor/go.etcd.io/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go
index fa25811..4660ace 100644
--- a/vendor/go.etcd.io/etcd/clientv3/options.go
+++ b/vendor/go.etcd.io/etcd/clientv3/options.go
@@ -16,17 +16,17 @@
import (
"math"
+ "time"
"google.golang.org/grpc"
)
var (
- // Disable gRPC internal retrial logic
- // TODO: enable when gRPC retry is stable (FailFast=false)
- // Reference:
- // - https://github.com/grpc/grpc-go/issues/1532
- // - https://github.com/grpc/proposal/blob/master/A6-client-retries.md
- defaultFailFast = grpc.FailFast(true)
+ // client-side handling retrying of request failures where data was not written to the wire or
+ // where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
+ // but for etcd we default to "FailFast(false)" to minimize client request error responses due to
+ // transient failures.
+ defaultFailFast = grpc.FailFast(false)
// client-side request send limit, gRPC default is math.MaxInt32
// Make sure that "client-side send limit < server-side default send/recv limit"
@@ -38,6 +38,22 @@
// because range response can easily exceed request send limits
// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
+
+ // client-side non-streaming retry limit, only applied to requests where server responds with
+ // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+ // If set to 0, retry is disabled.
+ defaultUnaryMaxRetries uint = 100
+
+ // client-side streaming retry limit, only applied to requests where server responds with
+ // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+ // If set to 0, retry is disabled.
+ defaultStreamMaxRetries = uint(^uint(0)) // max uint
+
+ // client-side retry backoff wait between requests.
+ defaultBackoffWaitBetween = 25 * time.Millisecond
+
+ // client-side retry backoff default jitter fraction.
+ defaultBackoffJitterFraction = 0.10
)
// defaultCallOpts defines a list of default "gRPC.CallOption".
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go
index 7f89ba6..38ad00a 100644
--- a/vendor/go.etcd.io/etcd/clientv3/retry.go
+++ b/vendor/go.etcd.io/etcd/clientv3/retry.go
@@ -17,8 +17,8 @@
import (
"context"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -32,465 +32,267 @@
nonRepeatable
)
+func (rp retryPolicy) String() string {
+ switch rp {
+ case repeatable:
+ return "repeatable"
+ case nonRepeatable:
+ return "nonRepeatable"
+ default:
+ return "UNKNOWN"
+ }
+}
+
type rpcFunc func(ctx context.Context) error
type retryRPCFunc func(context.Context, rpcFunc, retryPolicy) error
type retryStopErrFunc func(error) bool
+// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
+//
// immutable requests (e.g. Get) should be retried unless it's
// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
//
-// "isRepeatableStopError" returns "true" when an immutable request
-// is interrupted by server-side or gRPC-side error and its status
-// code is not transient (!= codes.Unavailable).
-//
-// Returning "true" means retry should stop, since client cannot
+// Returning "false" means retry should stop, since client cannot
// handle itself even with retries.
-func isRepeatableStopError(err error) bool {
+func isSafeRetryImmutableRPC(err error) bool {
eErr := rpctypes.Error(err)
- // always stop retry on etcd errors
if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
- return true
+ // interrupted by non-transient server-side or gRPC-side error
+ // client cannot handle itself (e.g. rpctypes.ErrCompacted)
+ return false
}
// only retry if unavailable
- ev, _ := status.FromError(err)
- return ev.Code() != codes.Unavailable
+ ev, ok := status.FromError(err)
+ if !ok {
+ // all errors from RPC is typed "grpc/status.(*statusError)"
+ // (ref. https://github.com/grpc/grpc-go/pull/1782)
+ //
+ // if the error type is not "grpc/status.(*statusError)",
+ // it could be from "Dial"
+ // TODO: do not retry for now
+ // ref. https://github.com/grpc/grpc-go/issues/1581
+ return false
+ }
+ return ev.Code() == codes.Unavailable
}
+// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
+//
// mutable requests (e.g. Put, Delete, Txn) should only be retried
// when the status code is codes.Unavailable when initial connection
-// has not been established (no pinned endpoint).
+// has not been established (no endpoint is up).
//
-// "isNonRepeatableStopError" returns "true" when a mutable request
-// is interrupted by non-transient error that client cannot handle itself,
-// or transient error while the connection has already been established
-// (pinned endpoint exists).
-//
-// Returning "true" means retry should stop, otherwise it violates
+// Returning "false" means retry should stop, otherwise it violates
// write-at-most-once semantics.
-func isNonRepeatableStopError(err error) bool {
- ev, _ := status.FromError(err)
- if ev.Code() != codes.Unavailable {
- return true
+func isSafeRetryMutableRPC(err error) bool {
+ if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
+ // not safe for mutable RPCs
+ // e.g. interrupted by non-transient error that client cannot handle itself,
+ // or transient error while the connection has already been established
+ return false
}
desc := rpctypes.ErrorDesc(err)
- return desc != "there is no address available" && desc != "there is no connection available"
-}
-
-func (c *Client) newRetryWrapper() retryRPCFunc {
- return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
- var isStop retryStopErrFunc
- switch rp {
- case repeatable:
- isStop = isRepeatableStopError
- case nonRepeatable:
- isStop = isNonRepeatableStopError
- }
- for {
- if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil {
- return err
- }
- pinned := c.balancer.pinned()
- err := f(rpcCtx)
- if err == nil {
- return nil
- }
- logger.Lvl(4).Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned)
-
- if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) {
- // mark this before endpoint switch is triggered
- c.balancer.hostPortError(pinned, err)
- c.balancer.next()
- logger.Lvl(4).Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error())
- }
-
- if isStop(err) {
- return err
- }
- }
- }
-}
-
-func (c *Client) newAuthRetryWrapper(retryf retryRPCFunc) retryRPCFunc {
- return func(rpcCtx context.Context, f rpcFunc, rp retryPolicy) error {
- for {
- pinned := c.balancer.pinned()
- err := retryf(rpcCtx, f, rp)
- if err == nil {
- return nil
- }
- logger.Lvl(4).Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned)
- // always stop retry on etcd errors other than invalid auth token
- if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
- gterr := c.getToken(rpcCtx)
- if gterr != nil {
- logger.Lvl(4).Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned)
- return err // return the original error for simplicity
- }
- continue
- }
- return err
- }
- }
+ return desc == "there is no address available" || desc == "there is no connection available"
}
type retryKVClient struct {
- kc pb.KVClient
- retryf retryRPCFunc
+ kc pb.KVClient
}
// RetryKVClient implements a KVClient.
func RetryKVClient(c *Client) pb.KVClient {
return &retryKVClient{
- kc: pb.NewKVClient(c.conn),
- retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
+ kc: pb.NewKVClient(c.conn),
}
}
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Range(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Put(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rkv.kc.Put(ctx, in, opts...)
}
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.DeleteRange(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rkv.kc.DeleteRange(ctx, in, opts...)
}
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
- // TODO: "repeatable" for read-only txn
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Txn(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rkv.kc.Txn(ctx, in, opts...)
}
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
- err = rkv.retryf(ctx, func(rctx context.Context) error {
- resp, err = rkv.kc.Compact(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rkv.kc.Compact(ctx, in, opts...)
}
type retryLeaseClient struct {
- lc pb.LeaseClient
- retryf retryRPCFunc
+ lc pb.LeaseClient
}
// RetryLeaseClient implements a LeaseClient.
func RetryLeaseClient(c *Client) pb.LeaseClient {
return &retryLeaseClient{
- lc: pb.NewLeaseClient(c.conn),
- retryf: c.newAuthRetryWrapper(c.newRetryWrapper()),
+ lc: pb.NewLeaseClient(c.conn),
}
}
func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseLeases(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseGrant(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
-
+ return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
- err = rlc.retryf(ctx, func(rctx context.Context) error {
- stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...)
- return err
- }, repeatable)
- return stream, err
+ return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
}
type retryClusterClient struct {
- cc pb.ClusterClient
- retryf retryRPCFunc
+ cc pb.ClusterClient
}
// RetryClusterClient implements a ClusterClient.
func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{
- cc: pb.NewClusterClient(c.conn),
- retryf: c.newRetryWrapper(),
+ cc: pb.NewClusterClient(c.conn),
}
}
func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberList(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberAdd(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rcc.cc.MemberAdd(ctx, in, opts...)
}
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberRemove(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rcc.cc.MemberRemove(ctx, in, opts...)
}
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
- err = rcc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rcc.cc.MemberUpdate(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rcc.cc.MemberUpdate(ctx, in, opts...)
}
type retryMaintenanceClient struct {
- mc pb.MaintenanceClient
- retryf retryRPCFunc
+ mc pb.MaintenanceClient
}
// RetryMaintenanceClient implements a Maintenance.
func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
return &retryMaintenanceClient{
- mc: pb.NewMaintenanceClient(conn),
- retryf: c.newRetryWrapper(),
+ mc: pb.NewMaintenanceClient(conn),
}
}
func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Alarm(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Status(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Hash(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.HashKV(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- stream, err = rmc.mc.Snapshot(rctx, in, opts...)
- return err
- }, repeatable)
- return stream, err
+ return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.MoveLeader(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
- err = rmc.retryf(ctx, func(rctx context.Context) error {
- resp, err = rmc.mc.Defragment(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rmc.mc.Defragment(ctx, in, opts...)
}
type retryAuthClient struct {
- ac pb.AuthClient
- retryf retryRPCFunc
+ ac pb.AuthClient
}
// RetryAuthClient implements a AuthClient.
func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{
- ac: pb.NewAuthClient(c.conn),
- retryf: c.newRetryWrapper(),
+ ac: pb.NewAuthClient(c.conn),
}
}
func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserList(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserGet(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleGet(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleList(rctx, in, opts...)
- return err
- }, repeatable)
- return resp, err
+ return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
}
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.AuthEnable(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.AuthEnable(ctx, in, opts...)
}
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.AuthDisable(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.AuthDisable(ctx, in, opts...)
}
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserAdd(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.UserAdd(ctx, in, opts...)
}
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserDelete(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.UserDelete(ctx, in, opts...)
}
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserChangePassword(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.UserChangePassword(ctx, in, opts...)
}
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserGrantRole(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.UserGrantRole(ctx, in, opts...)
}
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.UserRevokeRole(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.UserRevokeRole(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleAdd(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.RoleAdd(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleDelete(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.RoleDelete(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.RoleGrantPermission(ctx, in, opts...)
}
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.RoleRevokePermission(ctx, in, opts...)
}
func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
- err = rac.retryf(ctx, func(rctx context.Context) error {
- resp, err = rac.ac.Authenticate(rctx, in, opts...)
- return err
- }, nonRepeatable)
- return resp, err
+ return rac.ac.Authenticate(ctx, in, opts...)
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
new file mode 100644
index 0000000..6b1054e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
@@ -0,0 +1,395 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
+// fine grained error checking required by write-at-most-once retry semantics of etcd.
+
+package clientv3
+
+import (
+ "context"
+ "io"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// unaryClientInterceptor returns a new retrying unary client interceptor.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
+ intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ grpcOpts, retryOpts := filterCallOptions(opts)
+ callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+ // short circuit for simplicity, and avoiding allocations.
+ if callOpts.max == 0 {
+ return invoker(ctx, method, req, reply, cc, grpcOpts...)
+ }
+ var lastErr error
+ for attempt := uint(0); attempt < callOpts.max; attempt++ {
+ if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
+ return err
+ }
+ logger.Info(
+ "retrying of unary invoker",
+ zap.String("target", cc.Target()),
+ zap.Uint("attempt", attempt),
+ )
+ lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
+ if lastErr == nil {
+ return nil
+ }
+ logger.Warn(
+ "retrying of unary invoker failed",
+ zap.String("target", cc.Target()),
+ zap.Uint("attempt", attempt),
+ zap.Error(lastErr),
+ )
+ if isContextError(lastErr) {
+ if ctx.Err() != nil {
+ // its the context deadline or cancellation.
+ return lastErr
+ }
+ // its the callCtx deadline or cancellation, in which case try again.
+ continue
+ }
+ if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
+ gterr := c.getToken(ctx)
+ if gterr != nil {
+ logger.Warn(
+ "retrying of unary invoker failed to fetch new auth token",
+ zap.String("target", cc.Target()),
+ zap.Error(gterr),
+ )
+ return lastErr // return the original error for simplicity
+ }
+ continue
+ }
+ if !isSafeRetry(c.lg, lastErr, callOpts) {
+ return lastErr
+ }
+ }
+ return lastErr
+ }
+}
+
+// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+//
+// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
+// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
+// BidiStreams), the retry interceptor will fail the call.
+func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
+ intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ grpcOpts, retryOpts := filterCallOptions(opts)
+ callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+ // short circuit for simplicity, and avoiding allocations.
+ if callOpts.max == 0 {
+ return streamer(ctx, desc, cc, method, grpcOpts...)
+ }
+ if desc.ClientStreams {
+ return nil, grpc.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
+ }
+ newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
+ logger.Info("retry stream intercept", zap.Error(err))
+ if err != nil {
+ // TODO(mwitkow): Maybe dial and transport errors should be retriable?
+ return nil, err
+ }
+ retryingStreamer := &serverStreamingRetryingStream{
+ client: c,
+ ClientStream: newStreamer,
+ callOpts: callOpts,
+ ctx: ctx,
+ streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
+ return streamer(ctx, desc, cc, method, grpcOpts...)
+ },
+ }
+ return retryingStreamer, nil
+ }
+}
+
+// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
+// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
+// a new ClientStream according to the retry policy.
+type serverStreamingRetryingStream struct {
+ grpc.ClientStream
+ client *Client
+ bufferedSends []interface{} // single message that the client can sen
+ receivedGood bool // indicates whether any prior receives were successful
+ wasClosedSend bool // indicates that CloseSend was closed
+ ctx context.Context
+ callOpts *options
+ streamerCall func(ctx context.Context) (grpc.ClientStream, error)
+ mu sync.RWMutex
+}
+
+func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
+ s.mu.Lock()
+ s.ClientStream = clientStream
+ s.mu.Unlock()
+}
+
+func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.ClientStream
+}
+
+func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
+ s.mu.Lock()
+ s.bufferedSends = append(s.bufferedSends, m)
+ s.mu.Unlock()
+ return s.getStream().SendMsg(m)
+}
+
+func (s *serverStreamingRetryingStream) CloseSend() error {
+ s.mu.Lock()
+ s.wasClosedSend = true
+ s.mu.Unlock()
+ return s.getStream().CloseSend()
+}
+
+func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
+ return s.getStream().Header()
+}
+
+func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
+ return s.getStream().Trailer()
+}
+
+func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
+ attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
+ if !attemptRetry {
+ return lastErr // success or hard failure
+ }
+ // We start off from attempt 1, because zeroth was already made on normal SendMsg().
+ for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
+ if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
+ return err
+ }
+ newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
+ if err != nil {
+ // TODO(mwitkow): Maybe dial and transport errors should be retriable?
+ return err
+ }
+ s.setStream(newStream)
+ attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
+ //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
+ if !attemptRetry {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
+ s.mu.RLock()
+ wasGood := s.receivedGood
+ s.mu.RUnlock()
+ err := s.getStream().RecvMsg(m)
+ if err == nil || err == io.EOF {
+ s.mu.Lock()
+ s.receivedGood = true
+ s.mu.Unlock()
+ return false, err
+ } else if wasGood {
+ // previous RecvMsg in the stream succeeded, no retry logic should interfere
+ return false, err
+ }
+ if isContextError(err) {
+ if s.ctx.Err() != nil {
+ return false, err
+ }
+ // its the callCtx deadline or cancellation, in which case try again.
+ return true, err
+ }
+ if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
+ gterr := s.client.getToken(s.ctx)
+ if gterr != nil {
+ s.client.lg.Info("retry failed to fetch new auth token", zap.Error(gterr))
+ return false, err // return the original error for simplicity
+ }
+ return true, err
+
+ }
+ return isSafeRetry(s.client.lg, err, s.callOpts), err
+}
+
+func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
+ s.mu.RLock()
+ bufferedSends := s.bufferedSends
+ s.mu.RUnlock()
+ newStream, err := s.streamerCall(callCtx)
+ if err != nil {
+ return nil, err
+ }
+ for _, msg := range bufferedSends {
+ if err := newStream.SendMsg(msg); err != nil {
+ return nil, err
+ }
+ }
+ if err := newStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return newStream, nil
+}
+
+func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
+ waitTime := time.Duration(0)
+ if attempt > 0 {
+ waitTime = callOpts.backoffFunc(attempt)
+ }
+ if waitTime > 0 {
+ timer := time.NewTimer(waitTime)
+ select {
+ case <-ctx.Done():
+ timer.Stop()
+ return contextErrToGrpcErr(ctx.Err())
+ case <-timer.C:
+ }
+ }
+ return nil
+}
+
+// isSafeRetry returns "true", if request is safe for retry with the given error.
+func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
+ if isContextError(err) {
+ return false
+ }
+ switch callOpts.retryPolicy {
+ case repeatable:
+ return isSafeRetryImmutableRPC(err)
+ case nonRepeatable:
+ return isSafeRetryMutableRPC(err)
+ default:
+ lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
+ return false
+ }
+}
+
+func isContextError(err error) bool {
+ return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
+}
+
+func contextErrToGrpcErr(err error) error {
+ switch err {
+ case context.DeadlineExceeded:
+ return grpc.Errorf(codes.DeadlineExceeded, err.Error())
+ case context.Canceled:
+ return grpc.Errorf(codes.Canceled, err.Error())
+ default:
+ return grpc.Errorf(codes.Unknown, err.Error())
+ }
+}
+
+var (
+ defaultOptions = &options{
+ retryPolicy: nonRepeatable,
+ max: 0, // disable
+ backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
+ retryAuth: true,
+ }
+)
+
+// backoffFunc denotes a family of functions that control the backoff duration between call retries.
+//
+// They are called with an identifier of the attempt, and should return a time the system client should
+// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
+// the deadline of the request takes precedence and the wait will be interrupted before proceeding
+// with the next iteration.
+type backoffFunc func(attempt uint) time.Duration
+
+// withRetryPolicy sets the retry policy of this call.
+func withRetryPolicy(rp retryPolicy) retryOption {
+ return retryOption{applyFunc: func(o *options) {
+ o.retryPolicy = rp
+ }}
+}
+
+// withAuthRetry sets enables authentication retries.
+func withAuthRetry(retryAuth bool) retryOption {
+ return retryOption{applyFunc: func(o *options) {
+ o.retryAuth = retryAuth
+ }}
+}
+
+// withMax sets the maximum number of retries on this call, or this interceptor.
+func withMax(maxRetries uint) retryOption {
+ return retryOption{applyFunc: func(o *options) {
+ o.max = maxRetries
+ }}
+}
+
+// WithBackoff sets the `BackoffFunc `used to control time between retries.
+func withBackoff(bf backoffFunc) retryOption {
+ return retryOption{applyFunc: func(o *options) {
+ o.backoffFunc = bf
+ }}
+}
+
+type options struct {
+ retryPolicy retryPolicy
+ max uint
+ backoffFunc backoffFunc
+ retryAuth bool
+}
+
+// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
+type retryOption struct {
+ grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
+ applyFunc func(opt *options)
+}
+
+func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
+ if len(retryOptions) == 0 {
+ return opt
+ }
+ optCopy := &options{}
+ *optCopy = *opt
+ for _, f := range retryOptions {
+ f.applyFunc(optCopy)
+ }
+ return optCopy
+}
+
+func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
+ for _, opt := range callOptions {
+ if co, ok := opt.(retryOption); ok {
+ retryOptions = append(retryOptions, co)
+ } else {
+ grpcOptions = append(grpcOptions, opt)
+ }
+ }
+ return grpcOptions, retryOptions
+}
+
+// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
+//
+// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
+func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+ return func(attempt uint) time.Duration {
+ return jitterUp(waitBetween, jitterFraction)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/txn.go b/vendor/go.etcd.io/etcd/clientv3/txn.go
index c3c2d24..c19715d 100644
--- a/vendor/go.etcd.io/etcd/clientv3/txn.go
+++ b/vendor/go.etcd.io/etcd/clientv3/txn.go
@@ -18,7 +18,7 @@
"context"
"sync"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"google.golang.org/grpc"
)
diff --git a/vendor/go.etcd.io/etcd/clientv3/utils.go b/vendor/go.etcd.io/etcd/clientv3/utils.go
new file mode 100644
index 0000000..8502758
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/utils.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "math/rand"
+ "time"
+)
+
+// jitterUp adds random jitter to the duration.
+//
+// This adds or subtracts time from the duration within a given jitter fraction.
+// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
+//
+// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
+func jitterUp(duration time.Duration, jitter float64) time.Duration {
+ multiplier := jitter * (rand.Float64()*2 - 1)
+ return time.Duration(float64(duration) * (1 + multiplier))
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go
index d763385..8ec58bb 100644
--- a/vendor/go.etcd.io/etcd/clientv3/watch.go
+++ b/vendor/go.etcd.io/etcd/clientv3/watch.go
@@ -16,13 +16,14 @@
import (
"context"
+ "errors"
"fmt"
"sync"
"time"
- v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
+ v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -46,8 +47,33 @@
// through the returned channel. If revisions waiting to be sent over the
// watch are compacted, then the watch will be canceled by the server, the
// client will post a compacted error watch response, and the channel will close.
+ // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
+ // and "WatchResponse" from this closed channel has zero events and nil "Err()".
+ // The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
+ // to release the associated resources.
+ //
+ // If the context is "context.Background/TODO", returned "WatchChan" will
+ // not be closed and block until event is triggered, except when server
+ // returns a non-recoverable error (e.g. ErrCompacted).
+ // For example, when context passed with "WithRequireLeader" and the
+ // connected server has no leader (e.g. due to network partition),
+ // error "etcdserver: no leader" (ErrNoLeader) will be returned,
+ // and then "WatchChan" is closed with non-nil "Err()".
+ // In order to prevent a watch stream being stuck in a partitioned node,
+ // make sure to wrap context with "WithRequireLeader".
+ //
+ // Otherwise, as long as the context has not been canceled or timed out,
+ // watch will retry on other recoverable errors forever until reconnected.
+ //
+ // TODO: explicitly set context error in the last "WatchResponse" message and close channel?
+ // Currently, client contexts are overwritten with "valCtx" that never closes.
+ // TODO(v3.4): configure watch retry policy, limit maximum retry number
+ // (see https://github.com/etcd-io/etcd/issues/8980)
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
+ // RequestProgress requests a progress notify response be sent in all watch channels.
+ RequestProgress(ctx context.Context) error
+
// Close closes the watcher and cancels all watch requests.
Close() error
}
@@ -134,7 +160,7 @@
resuming []*watcherStream
// reqc sends a watch request from Watch() to the main goroutine
- reqc chan *watchRequest
+ reqc chan watchStreamRequest
// respc receives data from the watch client
respc chan *pb.WatchResponse
// donec closes to broadcast shutdown
@@ -152,16 +178,27 @@
closeErr error
}
+// watchStreamRequest is a union of the supported watch request operation types
+type watchStreamRequest interface {
+ toPB() *pb.WatchRequest
+}
+
// watchRequest is issued by the subscriber to start a new watcher
type watchRequest struct {
ctx context.Context
key string
end string
rev int64
+
// send created notification event if this field is true
createdNotify bool
// progressNotify is for progress updates
progressNotify bool
+ // fragmentation should be disabled by default
+ // if true, split watch events when total exceeds
+ // "--max-request-bytes" flag value + 512-byte
+ fragment bool
+
// filters is the list of events to filter out
filters []pb.WatchCreateRequest_FilterType
// get the previous key-value pair before the event happens
@@ -170,6 +207,10 @@
retc chan chan WatchResponse
}
+// progressRequest is issued by the subscriber to request watch progress
+type progressRequest struct {
+}
+
// watcherStream represents a registered watcher
type watcherStream struct {
// initReq is the request that initiated this request
@@ -227,7 +268,7 @@
cancel: cancel,
substreams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
- reqc: make(chan *watchRequest),
+ reqc: make(chan watchStreamRequest),
donec: make(chan struct{}),
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
@@ -256,6 +297,7 @@
end: string(ow.end),
rev: ow.rev,
progressNotify: ow.progressNotify,
+ fragment: ow.fragment,
filters: filters,
prevKV: ow.prevKV,
retc: make(chan chan WatchResponse, 1),
@@ -292,7 +334,7 @@
case <-wr.ctx.Done():
case <-donec:
if wgs.closeErr != nil {
- closeCh <- WatchResponse{closeErr: wgs.closeErr}
+ closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
@@ -307,7 +349,7 @@
case <-ctx.Done():
case <-donec:
if wgs.closeErr != nil {
- closeCh <- WatchResponse{closeErr: wgs.closeErr}
+ closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
@@ -332,6 +374,42 @@
return err
}
+// RequestProgress requests a progress notify response be sent in all watch channels.
+func (w *watcher) RequestProgress(ctx context.Context) (err error) {
+ ctxKey := streamKeyFromCtx(ctx)
+
+ w.mu.Lock()
+ if w.streams == nil {
+ return fmt.Errorf("no stream found for context")
+ }
+ wgs := w.streams[ctxKey]
+ if wgs == nil {
+ wgs = w.newWatcherGrpcStream(ctx)
+ w.streams[ctxKey] = wgs
+ }
+ donec := wgs.donec
+ reqc := wgs.reqc
+ w.mu.Unlock()
+
+ pr := &progressRequest{}
+
+ select {
+ case reqc <- pr:
+ return nil
+ case <-ctx.Done():
+ if err == nil {
+ return ctx.Err()
+ }
+ return err
+ case <-donec:
+ if wgs.closeErr != nil {
+ return wgs.closeErr
+ }
+ // retry; may have dropped stream from no ctxs
+ return w.RequestProgress(ctx)
+ }
+}
+
func (w *watchGrpcStream) close() (err error) {
w.cancel()
<-w.donec
@@ -353,7 +431,9 @@
}
func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
- if resp.WatchId == -1 {
+ // check watch ID for backward compatibility (<= v3.3)
+ if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
+ w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
// failed; no channel
close(ws.recvc)
return
@@ -379,7 +459,7 @@
}
// close subscriber's channel
if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
- go w.sendCloseSubstream(ws, &WatchResponse{closeErr: w.closeErr})
+ go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
} else if ws.outc != nil {
close(ws.outc)
}
@@ -434,31 +514,48 @@
cancelSet := make(map[int64]struct{})
+ var cur *pb.WatchResponse
for {
select {
// Watch() requested
- case wreq := <-w.reqc:
- outc := make(chan WatchResponse, 1)
- ws := &watcherStream{
- initReq: *wreq,
- id: -1,
- outc: outc,
- // unbuffered so resumes won't cause repeat events
- recvc: make(chan *WatchResponse),
+ case req := <-w.reqc:
+ switch wreq := req.(type) {
+ case *watchRequest:
+ outc := make(chan WatchResponse, 1)
+ // TODO: pass custom watch ID?
+ ws := &watcherStream{
+ initReq: *wreq,
+ id: -1,
+ outc: outc,
+ // unbuffered so resumes won't cause repeat events
+ recvc: make(chan *WatchResponse),
+ }
+
+ ws.donec = make(chan struct{})
+ w.wg.Add(1)
+ go w.serveSubstream(ws, w.resumec)
+
+ // queue up for watcher creation/resume
+ w.resuming = append(w.resuming, ws)
+ if len(w.resuming) == 1 {
+ // head of resume queue, can register a new watcher
+ wc.Send(ws.initReq.toPB())
+ }
+ case *progressRequest:
+ wc.Send(wreq.toPB())
}
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
-
- // queue up for watcher creation/resume
- w.resuming = append(w.resuming, ws)
- if len(w.resuming) == 1 {
- // head of resume queue, can register a new watcher
- wc.Send(ws.initReq.toPB())
- }
- // New events from the watch client
+ // new events from the watch client
case pbresp := <-w.respc:
+ if cur == nil || pbresp.Created || pbresp.Canceled {
+ cur = pbresp
+ } else if cur != nil && cur.WatchId == pbresp.WatchId {
+ // merge new events
+ cur.Events = append(cur.Events, pbresp.Events...)
+ // update "Fragment" field; last response with "Fragment" == false
+ cur.Fragment = pbresp.Fragment
+ }
+
switch {
case pbresp.Created:
// response to head of queue creation
@@ -467,9 +564,14 @@
w.dispatchEvent(pbresp)
w.resuming[0] = nil
}
+
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
}
+
+ // reset for next iteration
+ cur = nil
+
case pbresp.Canceled && pbresp.CompactRevision == 0:
delete(cancelSet, pbresp.WatchId)
if ws, ok := w.substreams[pbresp.WatchId]; ok {
@@ -477,15 +579,31 @@
close(ws.recvc)
closing[ws] = struct{}{}
}
+
+ // reset for next iteration
+ cur = nil
+
+ case cur.Fragment:
+ // watch response events are still fragmented
+ // continue to fetch next fragmented event arrival
+ continue
+
default:
// dispatch to appropriate watch stream
- if ok := w.dispatchEvent(pbresp); ok {
+ ok := w.dispatchEvent(cur)
+
+ // reset for next iteration
+ cur = nil
+
+ if ok {
break
}
+
// watch response on unexpected watch id; cancel id
if _, ok := cancelSet[pbresp.WatchId]; ok {
break
}
+
cancelSet[pbresp.WatchId] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
@@ -495,6 +613,7 @@
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
+
// watch client failed on Recv; spawn another if possible
case err := <-w.errc:
if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
@@ -508,13 +627,15 @@
wc.Send(ws.initReq.toPB())
}
cancelSet = make(map[int64]struct{})
+
case <-w.ctx.Done():
return
+
case ws := <-w.closingc:
w.closeSubstream(ws)
delete(closing, ws)
+ // no more watchers on this stream, shutdown
if len(w.substreams)+len(w.resuming) == 0 {
- // no more watchers on this stream, shutdown
return
}
}
@@ -539,6 +660,7 @@
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
}
+ // TODO: return watch ID?
wr := &WatchResponse{
Header: *pbresp.Header,
Events: events,
@@ -547,7 +669,31 @@
Canceled: pbresp.Canceled,
cancelReason: pbresp.CancelReason,
}
- ws, ok := w.substreams[pbresp.WatchId]
+
+ // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
+ // indicate they should be broadcast.
+ if wr.IsProgressNotify() && pbresp.WatchId == -1 {
+ return w.broadcastResponse(wr)
+ }
+
+ return w.unicastResponse(wr, pbresp.WatchId)
+
+}
+
+// broadcastResponse send a watch response to all watch substreams.
+func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
+ for _, ws := range w.substreams {
+ select {
+ case ws.recvc <- wr:
+ case <-ws.donec:
+ }
+ }
+ return true
+}
+
+// unicastResponse sends a watch response to a specific watch substream.
+func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
+ ws, ok := w.substreams[watchId]
if !ok {
return false
}
@@ -815,11 +961,19 @@
ProgressNotify: wr.progressNotify,
Filters: wr.filters,
PrevKv: wr.prevKV,
+ Fragment: wr.fragment,
}
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}
+// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
+func (pr *progressRequest) toPB() *pb.WatchRequest {
+ req := &pb.WatchProgressRequest{}
+ cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
+ return &pb.WatchRequest{RequestUnion: cr}
+}
+
func streamKeyFromCtx(ctx context.Context) string {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
return fmt.Sprintf("%+v", md)
diff --git a/vendor/go.etcd.io/etcd/cmd/etcd b/vendor/go.etcd.io/etcd/cmd/etcd
deleted file mode 120000
index b870225..0000000
--- a/vendor/go.etcd.io/etcd/cmd/etcd
+++ /dev/null
@@ -1 +0,0 @@
-../
\ No newline at end of file
diff --git a/vendor/go.etcd.io/etcd/cmd/etcdctl b/vendor/go.etcd.io/etcd/cmd/etcdctl
deleted file mode 120000
index 05bb269..0000000
--- a/vendor/go.etcd.io/etcd/cmd/etcdctl
+++ /dev/null
@@ -1 +0,0 @@
-../etcdctl
\ No newline at end of file
diff --git a/vendor/go.etcd.io/etcd/cmd/functional b/vendor/go.etcd.io/etcd/cmd/functional
deleted file mode 120000
index 44faa31..0000000
--- a/vendor/go.etcd.io/etcd/cmd/functional
+++ /dev/null
@@ -1 +0,0 @@
-../functional
\ No newline at end of file
diff --git a/vendor/go.etcd.io/etcd/cmd/tools b/vendor/go.etcd.io/etcd/cmd/tools
deleted file mode 120000
index 4887d6e..0000000
--- a/vendor/go.etcd.io/etcd/cmd/tools
+++ /dev/null
@@ -1 +0,0 @@
-../tools
\ No newline at end of file
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
index 55eab38..9e45cea 100644
--- a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
@@ -61,6 +61,7 @@
ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
+ ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
@@ -111,6 +112,7 @@
ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
+ ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged,
ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
@@ -163,6 +165,7 @@
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotLeader = Error(ErrGRPCNotLeader)
+ ErrLeaderChanged = Error(ErrGRPCLeaderChanged)
ErrNotCapable = Error(ErrGRPCNotCapable)
ErrStopped = Error(ErrGRPCStopped)
ErrTimeout = Error(ErrGRPCTimeout)
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
new file mode 100644
index 0000000..8f8ac60
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+ TokenFieldNameGRPC = "token"
+ TokenFieldNameSwagger = "authorization"
+)
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
new file mode 100644
index 0000000..f5134b9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
@@ -0,0 +1,1039 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: etcdserver.proto
+
+/*
+ Package etcdserverpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ etcdserver.proto
+ raft_internal.proto
+ rpc.proto
+
+ It has these top-level messages:
+ Request
+ Metadata
+ RequestHeader
+ InternalRaftRequest
+ EmptyResponse
+ InternalAuthenticateRequest
+ ResponseHeader
+ RangeRequest
+ RangeResponse
+ PutRequest
+ PutResponse
+ DeleteRangeRequest
+ DeleteRangeResponse
+ RequestOp
+ ResponseOp
+ Compare
+ TxnRequest
+ TxnResponse
+ CompactionRequest
+ CompactionResponse
+ HashRequest
+ HashKVRequest
+ HashKVResponse
+ HashResponse
+ SnapshotRequest
+ SnapshotResponse
+ WatchRequest
+ WatchCreateRequest
+ WatchCancelRequest
+ WatchProgressRequest
+ WatchResponse
+ LeaseGrantRequest
+ LeaseGrantResponse
+ LeaseRevokeRequest
+ LeaseRevokeResponse
+ LeaseCheckpoint
+ LeaseCheckpointRequest
+ LeaseCheckpointResponse
+ LeaseKeepAliveRequest
+ LeaseKeepAliveResponse
+ LeaseTimeToLiveRequest
+ LeaseTimeToLiveResponse
+ LeaseLeasesRequest
+ LeaseStatus
+ LeaseLeasesResponse
+ Member
+ MemberAddRequest
+ MemberAddResponse
+ MemberRemoveRequest
+ MemberRemoveResponse
+ MemberUpdateRequest
+ MemberUpdateResponse
+ MemberListRequest
+ MemberListResponse
+ DefragmentRequest
+ DefragmentResponse
+ MoveLeaderRequest
+ MoveLeaderResponse
+ AlarmRequest
+ AlarmMember
+ AlarmResponse
+ StatusRequest
+ StatusResponse
+ AuthEnableRequest
+ AuthDisableRequest
+ AuthenticateRequest
+ AuthUserAddRequest
+ AuthUserGetRequest
+ AuthUserDeleteRequest
+ AuthUserChangePasswordRequest
+ AuthUserGrantRoleRequest
+ AuthUserRevokeRoleRequest
+ AuthRoleAddRequest
+ AuthRoleGetRequest
+ AuthUserListRequest
+ AuthRoleListRequest
+ AuthRoleDeleteRequest
+ AuthRoleGrantPermissionRequest
+ AuthRoleRevokePermissionRequest
+ AuthEnableResponse
+ AuthDisableResponse
+ AuthenticateResponse
+ AuthUserAddResponse
+ AuthUserGetResponse
+ AuthUserDeleteResponse
+ AuthUserChangePasswordResponse
+ AuthUserGrantRoleResponse
+ AuthUserRevokeRoleResponse
+ AuthRoleAddResponse
+ AuthRoleGetResponse
+ AuthRoleListResponse
+ AuthUserListResponse
+ AuthRoleDeleteResponse
+ AuthRoleGrantPermissionResponse
+ AuthRoleRevokePermissionResponse
+*/
+package etcdserverpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Request struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
+ Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
+ Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
+ Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
+ Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
+ PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
+ PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
+ PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
+ Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
+ Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
+ Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
+ Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
+ Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
+ Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
+ Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
+ Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
+ Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{0} }
+
+type Metadata struct {
+ NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
+ ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metadata) Reset() { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage() {}
+func (*Metadata) Descriptor() ([]byte, []int) { return fileDescriptorEtcdserver, []int{1} }
+
+func init() {
+ proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
+ proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
+}
+func (m *Request) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
+ i += copy(dAtA[i:], m.Method)
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
+ i += copy(dAtA[i:], m.Path)
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
+ i += copy(dAtA[i:], m.Val)
+ dAtA[i] = 0x28
+ i++
+ if m.Dir {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
+ i += copy(dAtA[i:], m.PrevValue)
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
+ if m.PrevExist != nil {
+ dAtA[i] = 0x40
+ i++
+ if *m.PrevExist {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ dAtA[i] = 0x48
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
+ dAtA[i] = 0x50
+ i++
+ if m.Wait {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x58
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
+ dAtA[i] = 0x60
+ i++
+ if m.Recursive {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x68
+ i++
+ if m.Sorted {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x70
+ i++
+ if m.Quorum {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x78
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
+ dAtA[i] = 0x80
+ i++
+ dAtA[i] = 0x1
+ i++
+ if m.Stream {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ if m.Refresh != nil {
+ dAtA[i] = 0x88
+ i++
+ dAtA[i] = 0x1
+ i++
+ if *m.Refresh {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Request) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovEtcdserver(uint64(m.ID))
+ l = len(m.Method)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ l = len(m.Val)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ n += 2
+ l = len(m.PrevValue)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ n += 1 + sovEtcdserver(uint64(m.PrevIndex))
+ if m.PrevExist != nil {
+ n += 2
+ }
+ n += 1 + sovEtcdserver(uint64(m.Expiration))
+ n += 2
+ n += 1 + sovEtcdserver(uint64(m.Since))
+ n += 2
+ n += 2
+ n += 2
+ n += 1 + sovEtcdserver(uint64(m.Time))
+ n += 3
+ if m.Refresh != nil {
+ n += 3
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Metadata) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovEtcdserver(uint64(m.NodeID))
+ n += 1 + sovEtcdserver(uint64(m.ClusterID))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovEtcdserver(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozEtcdserver(x uint64) (n int) {
+ return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Request: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Method = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Val = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Dir = bool(v != 0)
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PrevValue = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
+ }
+ m.PrevIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.PrevIndex |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.PrevExist = &b
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
+ }
+ m.Expiration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Expiration |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Wait = bool(v != 0)
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
+ }
+ m.Since = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Since |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Recursive = bool(v != 0)
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Sorted = bool(v != 0)
+ case 14:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Quorum = bool(v != 0)
+ case 15:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+ }
+ m.Time = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Time |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 16:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stream = bool(v != 0)
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Refresh = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEtcdserver(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ m.NodeID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NodeID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+ }
+ m.ClusterID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ClusterID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEtcdserver(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipEtcdserver(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthEtcdserver
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipEtcdserver(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) }
+
+var fileDescriptorEtcdserver = []byte{
+ // 380 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
+ 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
+ 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
+ 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
+ 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
+ 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
+ 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
+ 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
+ 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
+ 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
+ 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
+ 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
+ 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
+ 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
+ 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
+ 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
+ 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
+ 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
+ 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
+ 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
+ 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
+ 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
+ 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
+ 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto
new file mode 100644
index 0000000..25e0aca
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/etcdserver.proto
@@ -0,0 +1,34 @@
+syntax = "proto2";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Request {
+ optional uint64 ID = 1 [(gogoproto.nullable) = false];
+ optional string Method = 2 [(gogoproto.nullable) = false];
+ optional string Path = 3 [(gogoproto.nullable) = false];
+ optional string Val = 4 [(gogoproto.nullable) = false];
+ optional bool Dir = 5 [(gogoproto.nullable) = false];
+ optional string PrevValue = 6 [(gogoproto.nullable) = false];
+ optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false];
+ optional bool PrevExist = 8 [(gogoproto.nullable) = true];
+ optional int64 Expiration = 9 [(gogoproto.nullable) = false];
+ optional bool Wait = 10 [(gogoproto.nullable) = false];
+ optional uint64 Since = 11 [(gogoproto.nullable) = false];
+ optional bool Recursive = 12 [(gogoproto.nullable) = false];
+ optional bool Sorted = 13 [(gogoproto.nullable) = false];
+ optional bool Quorum = 14 [(gogoproto.nullable) = false];
+ optional int64 Time = 15 [(gogoproto.nullable) = false];
+ optional bool Stream = 16 [(gogoproto.nullable) = false];
+ optional bool Refresh = 17 [(gogoproto.nullable) = true];
+}
+
+message Metadata {
+ optional uint64 NodeID = 1 [(gogoproto.nullable) = false];
+ optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
new file mode 100644
index 0000000..b170499
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
@@ -0,0 +1,2127 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft_internal.proto
+
+package etcdserverpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RequestHeader struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // username is a username that is associated with an auth token of gRPC connection
+ Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
+ // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+ AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
+}
+
+func (m *RequestHeader) Reset() { *m = RequestHeader{} }
+func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
+func (*RequestHeader) ProtoMessage() {}
+func (*RequestHeader) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{0} }
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+type InternalRaftRequest struct {
+ Header *RequestHeader `protobuf:"bytes,100,opt,name=header" json:"header,omitempty"`
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ V2 *Request `protobuf:"bytes,2,opt,name=v2" json:"v2,omitempty"`
+ Range *RangeRequest `protobuf:"bytes,3,opt,name=range" json:"range,omitempty"`
+ Put *PutRequest `protobuf:"bytes,4,opt,name=put" json:"put,omitempty"`
+ DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange" json:"delete_range,omitempty"`
+ Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn" json:"txn,omitempty"`
+ Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction" json:"compaction,omitempty"`
+ LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant" json:"lease_grant,omitempty"`
+ LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke" json:"lease_revoke,omitempty"`
+ Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm" json:"alarm,omitempty"`
+ LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint" json:"lease_checkpoint,omitempty"`
+ AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable" json:"auth_enable,omitempty"`
+ AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable" json:"auth_disable,omitempty"`
+ Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate" json:"authenticate,omitempty"`
+ AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd" json:"auth_user_add,omitempty"`
+ AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete" json:"auth_user_delete,omitempty"`
+ AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet" json:"auth_user_get,omitempty"`
+ AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword" json:"auth_user_change_password,omitempty"`
+ AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole" json:"auth_user_grant_role,omitempty"`
+ AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole" json:"auth_user_revoke_role,omitempty"`
+ AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList" json:"auth_user_list,omitempty"`
+ AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList" json:"auth_role_list,omitempty"`
+ AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd" json:"auth_role_add,omitempty"`
+ AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete" json:"auth_role_delete,omitempty"`
+ AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet" json:"auth_role_get,omitempty"`
+ AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission" json:"auth_role_grant_permission,omitempty"`
+ AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission" json:"auth_role_revoke_permission,omitempty"`
+}
+
+func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
+func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalRaftRequest) ProtoMessage() {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{1} }
+
+type EmptyResponse struct {
+}
+
+func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
+func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
+func (*EmptyResponse) ProtoMessage() {}
+func (*EmptyResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaftInternal, []int{2} }
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+type InternalAuthenticateRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ // simple_token is generated in API layer (etcdserver/v3_server.go)
+ SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
+}
+
+func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} }
+func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalAuthenticateRequest) ProtoMessage() {}
+func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRaftInternal, []int{3}
+}
+
+func init() {
+ proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
+ proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
+ proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
+ proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
+}
+func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+ }
+ if len(m.Username) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
+ i += copy(dAtA[i:], m.Username)
+ }
+ if m.AuthRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
+ }
+ return i, nil
+}
+
+func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+ }
+ if m.V2 != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.V2.Size()))
+ n1, err := m.V2.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.Range != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Range.Size()))
+ n2, err := m.Range.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if m.Put != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Put.Size()))
+ n3, err := m.Put.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ if m.DeleteRange != nil {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.DeleteRange.Size()))
+ n4, err := m.DeleteRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ if m.Txn != nil {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Txn.Size()))
+ n5, err := m.Txn.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ if m.Compaction != nil {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Compaction.Size()))
+ n6, err := m.Compaction.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ if m.LeaseGrant != nil {
+ dAtA[i] = 0x42
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseGrant.Size()))
+ n7, err := m.LeaseGrant.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ if m.LeaseRevoke != nil {
+ dAtA[i] = 0x4a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseRevoke.Size()))
+ n8, err := m.LeaseRevoke.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ if m.Alarm != nil {
+ dAtA[i] = 0x52
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Alarm.Size()))
+ n9, err := m.Alarm.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ if m.LeaseCheckpoint != nil {
+ dAtA[i] = 0x5a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.LeaseCheckpoint.Size()))
+ n10, err := m.LeaseCheckpoint.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n10
+ }
+ if m.Header != nil {
+ dAtA[i] = 0xa2
+ i++
+ dAtA[i] = 0x6
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Header.Size()))
+ n11, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ }
+ if m.AuthEnable != nil {
+ dAtA[i] = 0xc2
+ i++
+ dAtA[i] = 0x3e
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthEnable.Size()))
+ n12, err := m.AuthEnable.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ }
+ if m.AuthDisable != nil {
+ dAtA[i] = 0x9a
+ i++
+ dAtA[i] = 0x3f
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthDisable.Size()))
+ n13, err := m.AuthDisable.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ if m.Authenticate != nil {
+ dAtA[i] = 0xa2
+ i++
+ dAtA[i] = 0x3f
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.Authenticate.Size()))
+ n14, err := m.Authenticate.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ if m.AuthUserAdd != nil {
+ dAtA[i] = 0xe2
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserAdd.Size()))
+ n15, err := m.AuthUserAdd.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n15
+ }
+ if m.AuthUserDelete != nil {
+ dAtA[i] = 0xea
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserDelete.Size()))
+ n16, err := m.AuthUserDelete.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ if m.AuthUserGet != nil {
+ dAtA[i] = 0xf2
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGet.Size()))
+ n17, err := m.AuthUserGet.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ if m.AuthUserChangePassword != nil {
+ dAtA[i] = 0xfa
+ i++
+ dAtA[i] = 0x44
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserChangePassword.Size()))
+ n18, err := m.AuthUserChangePassword.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ if m.AuthUserGrantRole != nil {
+ dAtA[i] = 0x82
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserGrantRole.Size()))
+ n19, err := m.AuthUserGrantRole.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ }
+ if m.AuthUserRevokeRole != nil {
+ dAtA[i] = 0x8a
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserRevokeRole.Size()))
+ n20, err := m.AuthUserRevokeRole.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ if m.AuthUserList != nil {
+ dAtA[i] = 0x92
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthUserList.Size()))
+ n21, err := m.AuthUserList.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n21
+ }
+ if m.AuthRoleList != nil {
+ dAtA[i] = 0x9a
+ i++
+ dAtA[i] = 0x45
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleList.Size()))
+ n22, err := m.AuthRoleList.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ }
+ if m.AuthRoleAdd != nil {
+ dAtA[i] = 0x82
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleAdd.Size()))
+ n23, err := m.AuthRoleAdd.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ }
+ if m.AuthRoleDelete != nil {
+ dAtA[i] = 0x8a
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleDelete.Size()))
+ n24, err := m.AuthRoleDelete.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n24
+ }
+ if m.AuthRoleGet != nil {
+ dAtA[i] = 0x92
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGet.Size()))
+ n25, err := m.AuthRoleGet.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n25
+ }
+ if m.AuthRoleGrantPermission != nil {
+ dAtA[i] = 0x9a
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleGrantPermission.Size()))
+ n26, err := m.AuthRoleGrantPermission.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n26
+ }
+ if m.AuthRoleRevokePermission != nil {
+ dAtA[i] = 0xa2
+ i++
+ dAtA[i] = 0x4b
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRoleRevokePermission.Size()))
+ n27, err := m.AuthRoleRevokePermission.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n27
+ }
+ return i, nil
+}
+
+func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ if len(m.SimpleToken) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
+ i += copy(dAtA[i:], m.SimpleToken)
+ }
+ return i, nil
+}
+
+func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *RequestHeader) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRaftInternal(uint64(m.ID))
+ }
+ l = len(m.Username)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRevision != 0 {
+ n += 1 + sovRaftInternal(uint64(m.AuthRevision))
+ }
+ return n
+}
+
+func (m *InternalRaftRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRaftInternal(uint64(m.ID))
+ }
+ if m.V2 != nil {
+ l = m.V2.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Range != nil {
+ l = m.Range.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Put != nil {
+ l = m.Put.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.DeleteRange != nil {
+ l = m.DeleteRange.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Txn != nil {
+ l = m.Txn.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Compaction != nil {
+ l = m.Compaction.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseGrant != nil {
+ l = m.LeaseGrant.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseRevoke != nil {
+ l = m.LeaseRevoke.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Alarm != nil {
+ l = m.Alarm.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseCheckpoint != nil {
+ l = m.LeaseCheckpoint.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthEnable != nil {
+ l = m.AuthEnable.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthDisable != nil {
+ l = m.AuthDisable.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Authenticate != nil {
+ l = m.Authenticate.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserAdd != nil {
+ l = m.AuthUserAdd.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserDelete != nil {
+ l = m.AuthUserDelete.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserGet != nil {
+ l = m.AuthUserGet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserChangePassword != nil {
+ l = m.AuthUserChangePassword.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserGrantRole != nil {
+ l = m.AuthUserGrantRole.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserRevokeRole != nil {
+ l = m.AuthUserRevokeRole.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserList != nil {
+ l = m.AuthUserList.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleList != nil {
+ l = m.AuthRoleList.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleAdd != nil {
+ l = m.AuthRoleAdd.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleDelete != nil {
+ l = m.AuthRoleDelete.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleGet != nil {
+ l = m.AuthRoleGet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleGrantPermission != nil {
+ l = m.AuthRoleGrantPermission.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleRevokePermission != nil {
+ l = m.AuthRoleRevokePermission.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ return n
+}
+
+func (m *EmptyResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *InternalAuthenticateRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ l = len(m.SimpleToken)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ return n
+}
+
+func sovRaftInternal(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRaftInternal(x uint64) (n int) {
+ return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RequestHeader) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Username = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+ }
+ m.AuthRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AuthRevision |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.V2 == nil {
+ m.V2 = &Request{}
+ }
+ if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Range == nil {
+ m.Range = &RangeRequest{}
+ }
+ if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Put == nil {
+ m.Put = &PutRequest{}
+ }
+ if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DeleteRange == nil {
+ m.DeleteRange = &DeleteRangeRequest{}
+ }
+ if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Txn == nil {
+ m.Txn = &TxnRequest{}
+ }
+ if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Compaction == nil {
+ m.Compaction = &CompactionRequest{}
+ }
+ if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseGrant == nil {
+ m.LeaseGrant = &LeaseGrantRequest{}
+ }
+ if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseRevoke == nil {
+ m.LeaseRevoke = &LeaseRevokeRequest{}
+ }
+ if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Alarm == nil {
+ m.Alarm = &AlarmRequest{}
+ }
+ if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseCheckpoint == nil {
+ m.LeaseCheckpoint = &LeaseCheckpointRequest{}
+ }
+ if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 100:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &RequestHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1000:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthEnable == nil {
+ m.AuthEnable = &AuthEnableRequest{}
+ }
+ if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1011:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthDisable == nil {
+ m.AuthDisable = &AuthDisableRequest{}
+ }
+ if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1012:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Authenticate == nil {
+ m.Authenticate = &InternalAuthenticateRequest{}
+ }
+ if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1100:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserAdd == nil {
+ m.AuthUserAdd = &AuthUserAddRequest{}
+ }
+ if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1101:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserDelete == nil {
+ m.AuthUserDelete = &AuthUserDeleteRequest{}
+ }
+ if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1102:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserGet == nil {
+ m.AuthUserGet = &AuthUserGetRequest{}
+ }
+ if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1103:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserChangePassword == nil {
+ m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
+ }
+ if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1104:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserGrantRole == nil {
+ m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
+ }
+ if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1105:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserRevokeRole == nil {
+ m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
+ }
+ if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1106:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserList == nil {
+ m.AuthUserList = &AuthUserListRequest{}
+ }
+ if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1107:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleList == nil {
+ m.AuthRoleList = &AuthRoleListRequest{}
+ }
+ if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1200:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleAdd == nil {
+ m.AuthRoleAdd = &AuthRoleAddRequest{}
+ }
+ if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1201:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleDelete == nil {
+ m.AuthRoleDelete = &AuthRoleDeleteRequest{}
+ }
+ if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1202:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleGet == nil {
+ m.AuthRoleGet = &AuthRoleGetRequest{}
+ }
+ if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1203:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleGrantPermission == nil {
+ m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
+ }
+ if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1204:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleRevokePermission == nil {
+ m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
+ }
+ if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SimpleToken = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRaftInternal(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRaftInternal
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRaftInternal(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftInternal) }
+
+var fileDescriptorRaftInternal = []byte{
+ // 867 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xcb, 0x52, 0x33, 0x45,
+ 0x14, 0xc7, 0x49, 0xb8, 0xa6, 0x13, 0x2e, 0x36, 0xa0, 0x6d, 0xa8, 0x8a, 0x10, 0xbc, 0xe0, 0x0d,
+ 0xad, 0xf0, 0x00, 0x1a, 0x13, 0x0a, 0xa8, 0xa2, 0x90, 0x9a, 0xc2, 0x2a, 0xab, 0x5c, 0x8c, 0xcd,
+ 0xcc, 0x21, 0x19, 0x99, 0xcc, 0x8c, 0x3d, 0x9d, 0x88, 0x6f, 0xe2, 0x63, 0x78, 0xdb, 0xbb, 0x65,
+ 0xe1, 0x05, 0xf5, 0x05, 0x14, 0x37, 0xee, 0xbf, 0xef, 0x01, 0xbe, 0xea, 0xcb, 0xf4, 0x64, 0x92,
+ 0x0e, 0xbb, 0xc9, 0x39, 0xff, 0xf3, 0xfb, 0x9f, 0x99, 0x3e, 0x07, 0x1a, 0x6d, 0x32, 0x7a, 0xc3,
+ 0xdd, 0x20, 0xe2, 0xc0, 0x22, 0x1a, 0x1e, 0x26, 0x2c, 0xe6, 0x31, 0xae, 0x01, 0xf7, 0xfc, 0x14,
+ 0xd8, 0x08, 0x58, 0x72, 0x5d, 0xdf, 0xea, 0xc5, 0xbd, 0x58, 0x26, 0x3e, 0x10, 0x4f, 0x4a, 0x53,
+ 0xdf, 0xc8, 0x35, 0x3a, 0x52, 0x61, 0x89, 0xa7, 0x1e, 0x9b, 0x5f, 0xa2, 0x55, 0x07, 0xbe, 0x1e,
+ 0x42, 0xca, 0x4f, 0x81, 0xfa, 0xc0, 0xf0, 0x1a, 0x2a, 0x9f, 0x75, 0x49, 0x69, 0xb7, 0x74, 0xb0,
+ 0xe0, 0x94, 0xcf, 0xba, 0xb8, 0x8e, 0x56, 0x86, 0xa9, 0xb0, 0x1c, 0x00, 0x29, 0xef, 0x96, 0x0e,
+ 0x2a, 0x8e, 0xf9, 0x8d, 0xf7, 0xd1, 0x2a, 0x1d, 0xf2, 0xbe, 0xcb, 0x60, 0x14, 0xa4, 0x41, 0x1c,
+ 0x91, 0x79, 0x59, 0x56, 0x13, 0x41, 0x47, 0xc7, 0x9a, 0xbf, 0xac, 0xa3, 0xcd, 0x33, 0xdd, 0xb5,
+ 0x43, 0x6f, 0xb8, 0xb6, 0x9b, 0x32, 0x7a, 0x03, 0x95, 0x47, 0x2d, 0x69, 0x51, 0x6d, 0x6d, 0x1f,
+ 0x8e, 0xbf, 0xd7, 0xa1, 0x2e, 0x71, 0xca, 0xa3, 0x16, 0xfe, 0x10, 0x2d, 0x32, 0x1a, 0xf5, 0x40,
+ 0x7a, 0x55, 0x5b, 0xf5, 0x09, 0xa5, 0x48, 0x65, 0x72, 0x25, 0xc4, 0xef, 0xa0, 0xf9, 0x64, 0xc8,
+ 0xc9, 0x82, 0xd4, 0x93, 0xa2, 0xfe, 0x72, 0x98, 0xf5, 0xe3, 0x08, 0x11, 0xee, 0xa0, 0x9a, 0x0f,
+ 0x21, 0x70, 0x70, 0x95, 0xc9, 0xa2, 0x2c, 0xda, 0x2d, 0x16, 0x75, 0xa5, 0xa2, 0x60, 0x55, 0xf5,
+ 0xf3, 0x98, 0x30, 0xe4, 0x77, 0x11, 0x59, 0xb2, 0x19, 0x5e, 0xdd, 0x45, 0xc6, 0x90, 0xdf, 0x45,
+ 0xf8, 0x23, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x8f, 0x8b, 0xef, 0xb7, 0x2c, 0x4b, 0x5e, 0x2b, 0x96,
+ 0x74, 0x4c, 0x3e, 0xab, 0x1c, 0x2b, 0xc1, 0x1f, 0xa3, 0x6a, 0x08, 0x34, 0x05, 0xb7, 0xc7, 0x68,
+ 0xc4, 0xc9, 0x8a, 0x8d, 0x70, 0x2e, 0x04, 0x27, 0x22, 0x6f, 0x08, 0xa1, 0x09, 0x89, 0x77, 0x56,
+ 0x04, 0x06, 0xa3, 0xf8, 0x16, 0x48, 0xc5, 0xf6, 0xce, 0x12, 0xe1, 0x48, 0x81, 0x79, 0xe7, 0x30,
+ 0x8f, 0x89, 0x63, 0xa1, 0x21, 0x65, 0x03, 0x82, 0x6c, 0xc7, 0xd2, 0x16, 0x29, 0x73, 0x2c, 0x52,
+ 0x88, 0x3f, 0x45, 0x1b, 0xca, 0xd6, 0xeb, 0x83, 0x77, 0x9b, 0xc4, 0x41, 0xc4, 0x49, 0x55, 0x16,
+ 0xbf, 0x6e, 0xb1, 0xee, 0x18, 0x51, 0x86, 0x59, 0x0f, 0x8b, 0x71, 0x7c, 0x84, 0x96, 0xfa, 0x72,
+ 0x86, 0x89, 0x2f, 0x31, 0x3b, 0xd6, 0x21, 0x52, 0x63, 0xee, 0x68, 0x29, 0x6e, 0xa3, 0xaa, 0x1c,
+ 0x61, 0x88, 0xe8, 0x75, 0x08, 0xe4, 0x7f, 0xeb, 0x09, 0xb4, 0x87, 0xbc, 0x7f, 0x2c, 0x05, 0xe6,
+ 0xfb, 0x51, 0x13, 0xc2, 0x5d, 0x24, 0x07, 0xde, 0xf5, 0x83, 0x54, 0x32, 0x9e, 0x2d, 0xdb, 0x3e,
+ 0xa0, 0x60, 0x74, 0x95, 0xc2, 0x7c, 0x40, 0x9a, 0xc7, 0xf0, 0x85, 0xa2, 0x40, 0xc4, 0x03, 0x8f,
+ 0x72, 0x20, 0xcf, 0x15, 0xe5, 0xed, 0x22, 0x25, 0x5b, 0xa4, 0xf6, 0x98, 0x34, 0xc3, 0x15, 0xea,
+ 0xf1, 0xb1, 0xde, 0x4d, 0xb1, 0xac, 0x2e, 0xf5, 0x7d, 0xf2, 0xeb, 0xca, 0xac, 0xb6, 0x3e, 0x4b,
+ 0x81, 0xb5, 0x7d, 0xbf, 0xd0, 0x96, 0x8e, 0xe1, 0x0b, 0xb4, 0x91, 0x63, 0xd4, 0x90, 0x93, 0xdf,
+ 0x14, 0x69, 0xdf, 0x4e, 0xd2, 0xdb, 0xa1, 0x61, 0x6b, 0xb4, 0x10, 0x2e, 0xb6, 0xd5, 0x03, 0x4e,
+ 0x7e, 0x7f, 0xb2, 0xad, 0x13, 0xe0, 0x53, 0x6d, 0x9d, 0x00, 0xc7, 0x3d, 0xf4, 0x6a, 0x8e, 0xf1,
+ 0xfa, 0x62, 0xed, 0xdc, 0x84, 0xa6, 0xe9, 0x37, 0x31, 0xf3, 0xc9, 0x1f, 0x0a, 0xf9, 0xae, 0x1d,
+ 0xd9, 0x91, 0xea, 0x4b, 0x2d, 0xce, 0xe8, 0x2f, 0x53, 0x6b, 0x1a, 0x7f, 0x8e, 0xb6, 0xc6, 0xfa,
+ 0x15, 0xfb, 0xe2, 0xb2, 0x38, 0x04, 0xf2, 0xa0, 0x3c, 0xde, 0x9c, 0xd1, 0xb6, 0xdc, 0xb5, 0x38,
+ 0x3f, 0xea, 0x97, 0xe8, 0x64, 0x06, 0x7f, 0x81, 0xb6, 0x73, 0xb2, 0x5a, 0x3d, 0x85, 0xfe, 0x53,
+ 0xa1, 0xdf, 0xb2, 0xa3, 0xf5, 0x0e, 0x8e, 0xb1, 0x31, 0x9d, 0x4a, 0xe1, 0x53, 0xb4, 0x96, 0xc3,
+ 0xc3, 0x20, 0xe5, 0xe4, 0x2f, 0x45, 0xdd, 0xb3, 0x53, 0xcf, 0x83, 0x94, 0x17, 0xe6, 0x28, 0x0b,
+ 0x1a, 0x92, 0x68, 0x4d, 0x91, 0xfe, 0x9e, 0x49, 0x12, 0xd6, 0x53, 0xa4, 0x2c, 0x68, 0x8e, 0x5e,
+ 0x92, 0xc4, 0x44, 0x7e, 0x5f, 0x99, 0x75, 0xf4, 0xa2, 0x66, 0x72, 0x22, 0x75, 0xcc, 0x4c, 0xa4,
+ 0xc4, 0xe8, 0x89, 0xfc, 0xa1, 0x32, 0x6b, 0x22, 0x45, 0x95, 0x65, 0x22, 0xf3, 0x70, 0xb1, 0x2d,
+ 0x31, 0x91, 0x3f, 0x3e, 0xd9, 0xd6, 0xe4, 0x44, 0xea, 0x18, 0xfe, 0x0a, 0xd5, 0xc7, 0x30, 0x72,
+ 0x50, 0x12, 0x60, 0x83, 0x20, 0x95, 0xff, 0x18, 0x7f, 0x52, 0xcc, 0xf7, 0x66, 0x30, 0x85, 0xfc,
+ 0xd2, 0xa8, 0x33, 0xfe, 0x2b, 0xd4, 0x9e, 0xc7, 0x03, 0xb4, 0x93, 0x7b, 0xe9, 0xd1, 0x19, 0x33,
+ 0xfb, 0x59, 0x99, 0xbd, 0x6f, 0x37, 0x53, 0x53, 0x32, 0xed, 0x46, 0xe8, 0x0c, 0x41, 0x73, 0x1d,
+ 0xad, 0x1e, 0x0f, 0x12, 0xfe, 0xad, 0x03, 0x69, 0x12, 0x47, 0x29, 0x34, 0x13, 0xb4, 0xf3, 0xc4,
+ 0x1f, 0x22, 0x8c, 0xd1, 0x82, 0xbc, 0x2e, 0x94, 0xe4, 0x75, 0x41, 0x3e, 0x8b, 0x6b, 0x84, 0xd9,
+ 0x4f, 0x7d, 0x8d, 0xc8, 0x7e, 0xe3, 0x3d, 0x54, 0x4b, 0x83, 0x41, 0x12, 0x82, 0xcb, 0xe3, 0x5b,
+ 0x50, 0xb7, 0x88, 0x8a, 0x53, 0x55, 0xb1, 0x2b, 0x11, 0xfa, 0x64, 0xeb, 0xfe, 0xdf, 0xc6, 0xdc,
+ 0xfd, 0x63, 0xa3, 0xf4, 0xf0, 0xd8, 0x28, 0xfd, 0xf3, 0xd8, 0x28, 0x7d, 0xf7, 0x5f, 0x63, 0xee,
+ 0x7a, 0x49, 0xde, 0x61, 0x8e, 0x5e, 0x04, 0x00, 0x00, 0xff, 0xff, 0xed, 0x36, 0xf0, 0x6f, 0x1b,
+ 0x09, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto
new file mode 100644
index 0000000..7111f45
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal.proto
@@ -0,0 +1,75 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcdserver.proto";
+import "rpc.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message RequestHeader {
+ uint64 ID = 1;
+ // username is a username that is associated with an auth token of gRPC connection
+ string username = 2;
+ // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+ uint64 auth_revision = 3;
+}
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+message InternalRaftRequest {
+ RequestHeader header = 100;
+ uint64 ID = 1;
+
+ Request v2 = 2;
+
+ RangeRequest range = 3;
+ PutRequest put = 4;
+ DeleteRangeRequest delete_range = 5;
+ TxnRequest txn = 6;
+ CompactionRequest compaction = 7;
+
+ LeaseGrantRequest lease_grant = 8;
+ LeaseRevokeRequest lease_revoke = 9;
+
+ AlarmRequest alarm = 10;
+
+ LeaseCheckpointRequest lease_checkpoint = 11;
+
+ AuthEnableRequest auth_enable = 1000;
+ AuthDisableRequest auth_disable = 1011;
+
+ InternalAuthenticateRequest authenticate = 1012;
+
+ AuthUserAddRequest auth_user_add = 1100;
+ AuthUserDeleteRequest auth_user_delete = 1101;
+ AuthUserGetRequest auth_user_get = 1102;
+ AuthUserChangePasswordRequest auth_user_change_password = 1103;
+ AuthUserGrantRoleRequest auth_user_grant_role = 1104;
+ AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
+ AuthUserListRequest auth_user_list = 1106;
+ AuthRoleListRequest auth_role_list = 1107;
+
+ AuthRoleAddRequest auth_role_add = 1200;
+ AuthRoleDeleteRequest auth_role_delete = 1201;
+ AuthRoleGetRequest auth_role_get = 1202;
+ AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
+ AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
+}
+
+message EmptyResponse {
+}
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+message InternalAuthenticateRequest {
+ string name = 1;
+ string password = 2;
+
+ // simple_token is generated in API layer (etcdserver/v3_server.go)
+ string simple_token = 3;
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
new file mode 100644
index 0000000..3d3536a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserverpb
+
+import (
+ "fmt"
+ "strings"
+
+ proto "github.com/golang/protobuf/proto"
+)
+
+// InternalRaftStringer implements custom proto Stringer:
+// redact password, replace value fields with value_size fields.
+type InternalRaftStringer struct {
+ Request *InternalRaftRequest
+}
+
+func (as *InternalRaftStringer) String() string {
+ switch {
+ case as.Request.LeaseGrant != nil:
+ return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
+ as.Request.Header.String(),
+ as.Request.LeaseGrant.TTL,
+ as.Request.LeaseGrant.ID,
+ )
+ case as.Request.LeaseRevoke != nil:
+ return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
+ as.Request.Header.String(),
+ as.Request.LeaseRevoke.ID,
+ )
+ case as.Request.Authenticate != nil:
+ return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
+ as.Request.Header.String(),
+ as.Request.Authenticate.Name,
+ as.Request.Authenticate.SimpleToken,
+ )
+ case as.Request.AuthUserAdd != nil:
+ return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
+ as.Request.Header.String(),
+ as.Request.AuthUserAdd.Name,
+ )
+ case as.Request.AuthUserChangePassword != nil:
+ return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
+ as.Request.Header.String(),
+ as.Request.AuthUserChangePassword.Name,
+ )
+ case as.Request.Put != nil:
+ return fmt.Sprintf("header:<%s> put:<%s>",
+ as.Request.Header.String(),
+ NewLoggablePutRequest(as.Request.Put).String(),
+ )
+ case as.Request.Txn != nil:
+ return fmt.Sprintf("header:<%s> txn:<%s>",
+ as.Request.Header.String(),
+ NewLoggableTxnRequest(as.Request.Txn).String(),
+ )
+ default:
+ // nothing to redact
+ }
+ return as.Request.String()
+}
+
+// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
+// fields in any nested txn and put operations.
+type txnRequestStringer struct {
+ Request *TxnRequest
+}
+
+func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
+ return &txnRequestStringer{request}
+}
+
+func (as *txnRequestStringer) String() string {
+ var compare []string
+ for _, c := range as.Request.Compare {
+ switch cv := c.TargetUnion.(type) {
+ case *Compare_Value:
+ compare = append(compare, newLoggableValueCompare(c, cv).String())
+ default:
+ // nothing to redact
+ compare = append(compare, c.String())
+ }
+ }
+ var success []string
+ for _, s := range as.Request.Success {
+ success = append(success, newLoggableRequestOp(s).String())
+ }
+ var failure []string
+ for _, f := range as.Request.Failure {
+ failure = append(failure, newLoggableRequestOp(f).String())
+ }
+ return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
+ strings.Join(compare, " "),
+ strings.Join(success, " "),
+ strings.Join(failure, " "),
+ )
+}
+
+// requestOpStringer implements a custom proto String to replace value bytes fields with value
+// size fields in any nested txn and put operations.
+type requestOpStringer struct {
+ Op *RequestOp
+}
+
+func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
+ return &requestOpStringer{op}
+}
+
+func (as *requestOpStringer) String() string {
+ switch op := as.Op.Request.(type) {
+ case *RequestOp_RequestPut:
+ return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
+ case *RequestOp_RequestTxn:
+ return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
+ default:
+ // nothing to redact
+ }
+ return as.Op.String()
+}
+
+// loggableValueCompare implements a custom proto String for Compare.Value union member types to
+// replace the value bytes field with a value size field.
+// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
+type loggableValueCompare struct {
+ Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
+ Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
+ Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
+ ValueSize int `protobuf:"bytes,7,opt,name=value_size,proto3"`
+ RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"`
+}
+
+func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
+ return &loggableValueCompare{
+ c.Result,
+ c.Target,
+ c.Key,
+ len(cv.Value),
+ c.RangeEnd,
+ }
+}
+
+func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} }
+func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
+func (*loggableValueCompare) ProtoMessage() {}
+
+// loggablePutRequest implements a custom proto String to replace value bytes field with a value
+// size field.
+// To preserve proto encoding of the key bytes, a faked out proto type is used here.
+type loggablePutRequest struct {
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
+ ValueSize int `protobuf:"varint,2,opt,name=value_size,proto3"`
+ Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
+ PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
+ IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
+ IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
+}
+
+func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
+ return &loggablePutRequest{
+ request.Key,
+ len(request.Value),
+ request.Lease,
+ request.PrevKv,
+ request.IgnoreValue,
+ request.IgnoreLease,
+ }
+}
+
+func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} }
+func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
+func (*loggablePutRequest) ProtoMessage() {}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go
new file mode 100644
index 0000000..3e15079
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go
@@ -0,0 +1,19549 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: rpc.proto
+
+package etcdserverpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
+
+ authpb "go.etcd.io/etcd/auth/authpb"
+
+ context "golang.org/x/net/context"
+
+ grpc "google.golang.org/grpc"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AlarmType int32
+
+const (
+ AlarmType_NONE AlarmType = 0
+ AlarmType_NOSPACE AlarmType = 1
+ AlarmType_CORRUPT AlarmType = 2
+)
+
+var AlarmType_name = map[int32]string{
+ 0: "NONE",
+ 1: "NOSPACE",
+ 2: "CORRUPT",
+}
+var AlarmType_value = map[string]int32{
+ "NONE": 0,
+ "NOSPACE": 1,
+ "CORRUPT": 2,
+}
+
+func (x AlarmType) String() string {
+ return proto.EnumName(AlarmType_name, int32(x))
+}
+func (AlarmType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
+
+type RangeRequest_SortOrder int32
+
+const (
+ RangeRequest_NONE RangeRequest_SortOrder = 0
+ RangeRequest_ASCEND RangeRequest_SortOrder = 1
+ RangeRequest_DESCEND RangeRequest_SortOrder = 2
+)
+
+var RangeRequest_SortOrder_name = map[int32]string{
+ 0: "NONE",
+ 1: "ASCEND",
+ 2: "DESCEND",
+}
+var RangeRequest_SortOrder_value = map[string]int32{
+ "NONE": 0,
+ "ASCEND": 1,
+ "DESCEND": 2,
+}
+
+func (x RangeRequest_SortOrder) String() string {
+ return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
+}
+func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 0} }
+
+type RangeRequest_SortTarget int32
+
+const (
+ RangeRequest_KEY RangeRequest_SortTarget = 0
+ RangeRequest_VERSION RangeRequest_SortTarget = 1
+ RangeRequest_CREATE RangeRequest_SortTarget = 2
+ RangeRequest_MOD RangeRequest_SortTarget = 3
+ RangeRequest_VALUE RangeRequest_SortTarget = 4
+)
+
+var RangeRequest_SortTarget_name = map[int32]string{
+ 0: "KEY",
+ 1: "VERSION",
+ 2: "CREATE",
+ 3: "MOD",
+ 4: "VALUE",
+}
+var RangeRequest_SortTarget_value = map[string]int32{
+ "KEY": 0,
+ "VERSION": 1,
+ "CREATE": 2,
+ "MOD": 3,
+ "VALUE": 4,
+}
+
+func (x RangeRequest_SortTarget) String() string {
+ return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
+}
+func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} }
+
+type Compare_CompareResult int32
+
+const (
+ Compare_EQUAL Compare_CompareResult = 0
+ Compare_GREATER Compare_CompareResult = 1
+ Compare_LESS Compare_CompareResult = 2
+ Compare_NOT_EQUAL Compare_CompareResult = 3
+)
+
+var Compare_CompareResult_name = map[int32]string{
+ 0: "EQUAL",
+ 1: "GREATER",
+ 2: "LESS",
+ 3: "NOT_EQUAL",
+}
+var Compare_CompareResult_value = map[string]int32{
+ "EQUAL": 0,
+ "GREATER": 1,
+ "LESS": 2,
+ "NOT_EQUAL": 3,
+}
+
+func (x Compare_CompareResult) String() string {
+ return proto.EnumName(Compare_CompareResult_name, int32(x))
+}
+func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 0} }
+
+type Compare_CompareTarget int32
+
+const (
+ Compare_VERSION Compare_CompareTarget = 0
+ Compare_CREATE Compare_CompareTarget = 1
+ Compare_MOD Compare_CompareTarget = 2
+ Compare_VALUE Compare_CompareTarget = 3
+ Compare_LEASE Compare_CompareTarget = 4
+)
+
+var Compare_CompareTarget_name = map[int32]string{
+ 0: "VERSION",
+ 1: "CREATE",
+ 2: "MOD",
+ 3: "VALUE",
+ 4: "LEASE",
+}
+var Compare_CompareTarget_value = map[string]int32{
+ "VERSION": 0,
+ "CREATE": 1,
+ "MOD": 2,
+ "VALUE": 3,
+ "LEASE": 4,
+}
+
+func (x Compare_CompareTarget) String() string {
+ return proto.EnumName(Compare_CompareTarget_name, int32(x))
+}
+func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9, 1} }
+
+type WatchCreateRequest_FilterType int32
+
+const (
+ // filter out put event.
+ WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
+ // filter out delete event.
+ WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
+)
+
+var WatchCreateRequest_FilterType_name = map[int32]string{
+ 0: "NOPUT",
+ 1: "NODELETE",
+}
+var WatchCreateRequest_FilterType_value = map[string]int32{
+ "NOPUT": 0,
+ "NODELETE": 1,
+}
+
+func (x WatchCreateRequest_FilterType) String() string {
+ return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
+}
+func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{21, 0}
+}
+
+type AlarmRequest_AlarmAction int32
+
+const (
+ AlarmRequest_GET AlarmRequest_AlarmAction = 0
+ AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1
+ AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
+)
+
+var AlarmRequest_AlarmAction_name = map[int32]string{
+ 0: "GET",
+ 1: "ACTIVATE",
+ 2: "DEACTIVATE",
+}
+var AlarmRequest_AlarmAction_value = map[string]int32{
+ "GET": 0,
+ "ACTIVATE": 1,
+ "DEACTIVATE": 2,
+}
+
+func (x AlarmRequest_AlarmAction) String() string {
+ return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
+}
+func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{52, 0}
+}
+
+type ResponseHeader struct {
+ // cluster_id is the ID of the cluster which sent the response.
+ ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ // member_id is the ID of the member which sent the response.
+ MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
+ // revision is the key-value store revision when the request was applied.
+ // For watch progress responses, the header.revision indicates progress. All future events
+ // recieved in this stream are guaranteed to have a higher revision number than the
+ // header.revision number.
+ Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
+ // raft_term is the raft term when the request was applied.
+ RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
+}
+
+func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
+func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
+func (*ResponseHeader) ProtoMessage() {}
+func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
+
+func (m *ResponseHeader) GetClusterId() uint64 {
+ if m != nil {
+ return m.ClusterId
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetMemberId() uint64 {
+ if m != nil {
+ return m.MemberId
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetRaftTerm() uint64 {
+ if m != nil {
+ return m.RaftTerm
+ }
+ return 0
+}
+
+type RangeRequest struct {
+ // key is the first key for the range. If range_end is not given, the request only looks up key.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the upper bound on the requested range [key, range_end).
+ // If range_end is '\0', the range is all keys >= key.
+ // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+ // then the range request gets all keys prefixed with key.
+ // If both key and range_end are '\0', then the range request returns all keys.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+ // it is treated as no limit.
+ Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ // revision is the point-in-time of the key-value store to use for the range.
+ // If revision is less or equal to zero, the range is over the newest key-value store.
+ // If the revision has been compacted, ErrCompacted is returned as a response.
+ Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
+ // sort_order is the order for returned sorted results.
+ SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
+ // sort_target is the key-value field to use for sorting.
+ SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
+ // serializable sets the range request to use serializable member-local reads.
+ // Range requests are linearizable by default; linearizable requests have higher
+ // latency and lower throughput than serializable requests but reflect the current
+ // consensus of the cluster. For better performance, in exchange for possible stale reads,
+ // a serializable range request is served locally without needing to reach consensus
+ // with other nodes in the cluster.
+ Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
+ // keys_only when set returns only the keys and not the values.
+ KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
+ // count_only when set returns only the count of the keys in the range.
+ CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
+ // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+ // lesser mod revisions will be filtered away.
+ MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
+ // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+ // greater mod revisions will be filtered away.
+ MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
+ // min_create_revision is the lower bound for returned key create revisions; all keys with
+ // lesser create revisions will be filtered away.
+ MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
+ // max_create_revision is the upper bound for returned key create revisions; all keys with
+ // greater create revisions will be filtered away.
+ MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
+}
+
+func (m *RangeRequest) Reset() { *m = RangeRequest{} }
+func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
+func (*RangeRequest) ProtoMessage() {}
+func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
+
+func (m *RangeRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *RangeRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *RangeRequest) GetLimit() int64 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
+ if m != nil {
+ return m.SortOrder
+ }
+ return RangeRequest_NONE
+}
+
+func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
+ if m != nil {
+ return m.SortTarget
+ }
+ return RangeRequest_KEY
+}
+
+func (m *RangeRequest) GetSerializable() bool {
+ if m != nil {
+ return m.Serializable
+ }
+ return false
+}
+
+func (m *RangeRequest) GetKeysOnly() bool {
+ if m != nil {
+ return m.KeysOnly
+ }
+ return false
+}
+
+func (m *RangeRequest) GetCountOnly() bool {
+ if m != nil {
+ return m.CountOnly
+ }
+ return false
+}
+
+func (m *RangeRequest) GetMinModRevision() int64 {
+ if m != nil {
+ return m.MinModRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMaxModRevision() int64 {
+ if m != nil {
+ return m.MaxModRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMinCreateRevision() int64 {
+ if m != nil {
+ return m.MinCreateRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMaxCreateRevision() int64 {
+ if m != nil {
+ return m.MaxCreateRevision
+ }
+ return 0
+}
+
+type RangeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // kvs is the list of key-value pairs matched by the range request.
+ // kvs is empty when count is requested.
+ Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs" json:"kvs,omitempty"`
+ // more indicates if there are more keys to return in the requested range.
+ More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
+ // count is set to the number of keys within the range when requested.
+ Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+}
+
+func (m *RangeResponse) Reset() { *m = RangeResponse{} }
+func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
+func (*RangeResponse) ProtoMessage() {}
+func (*RangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
+
+func (m *RangeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
+ if m != nil {
+ return m.Kvs
+ }
+ return nil
+}
+
+func (m *RangeResponse) GetMore() bool {
+ if m != nil {
+ return m.More
+ }
+ return false
+}
+
+func (m *RangeResponse) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+type PutRequest struct {
+ // key is the key, in bytes, to put into the key-value store.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // value is the value, in bytes, to associate with the key in the key-value store.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // lease is the lease ID to associate with the key in the key-value store. A lease
+ // value of 0 indicates no lease.
+ Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
+ // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+ // The previous key-value pair will be returned in the put response.
+ PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ // If ignore_value is set, etcd updates the key using its current value.
+ // Returns an error if the key does not exist.
+ IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
+ // If ignore_lease is set, etcd updates the key using its current lease.
+ // Returns an error if the key does not exist.
+ IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} }
+
+func (m *PutRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *PutRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+func (m *PutRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+func (m *PutRequest) GetIgnoreValue() bool {
+ if m != nil {
+ return m.IgnoreValue
+ }
+ return false
+}
+
+func (m *PutRequest) GetIgnoreLease() bool {
+ if m != nil {
+ return m.IgnoreLease
+ }
+ return false
+}
+
+type PutResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // if prev_kv is set in the request, the previous key-value pair will be returned.
+ PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+func (*PutResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} }
+
+func (m *PutResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
+ if m != nil {
+ return m.PrevKv
+ }
+ return nil
+}
+
+type DeleteRangeRequest struct {
+ // key is the first key to delete in the range.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the key following the last key to delete for the range [key, range_end).
+ // If range_end is not given, the range is defined to contain only the key argument.
+ // If range_end is one bit larger than the given key, then the range is all the keys
+ // with the prefix (the given key).
+ // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+ // The previous key-value pairs will be returned in the delete response.
+ PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+}
+
+func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} }
+func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeRequest) ProtoMessage() {}
+func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} }
+
+func (m *DeleteRangeRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRangeRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *DeleteRangeRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+type DeleteRangeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // deleted is the number of keys deleted by the delete range request.
+ Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
+ // if prev_kv is set in the request, the previous key-value pairs will be returned.
+ PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs" json:"prev_kvs,omitempty"`
+}
+
+func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} }
+func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeResponse) ProtoMessage() {}
+func (*DeleteRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} }
+
+func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRangeResponse) GetDeleted() int64 {
+ if m != nil {
+ return m.Deleted
+ }
+ return 0
+}
+
+func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
+ if m != nil {
+ return m.PrevKvs
+ }
+ return nil
+}
+
+type RequestOp struct {
+ // request is a union of request types accepted by a transaction.
+ //
+ // Types that are valid to be assigned to Request:
+ // *RequestOp_RequestRange
+ // *RequestOp_RequestPut
+ // *RequestOp_RequestDeleteRange
+ // *RequestOp_RequestTxn
+ Request isRequestOp_Request `protobuf_oneof:"request"`
+}
+
+func (m *RequestOp) Reset() { *m = RequestOp{} }
+func (m *RequestOp) String() string { return proto.CompactTextString(m) }
+func (*RequestOp) ProtoMessage() {}
+func (*RequestOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} }
+
+type isRequestOp_Request interface {
+ isRequestOp_Request()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type RequestOp_RequestRange struct {
+ RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,oneof"`
+}
+type RequestOp_RequestPut struct {
+ RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,oneof"`
+}
+type RequestOp_RequestDeleteRange struct {
+ RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"`
+}
+type RequestOp_RequestTxn struct {
+ RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"`
+}
+
+func (*RequestOp_RequestRange) isRequestOp_Request() {}
+func (*RequestOp_RequestPut) isRequestOp_Request() {}
+func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
+func (*RequestOp_RequestTxn) isRequestOp_Request() {}
+
+func (m *RequestOp) GetRequest() isRequestOp_Request {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestRange() *RangeRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
+ return x.RequestRange
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestPut() *PutRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
+ return x.RequestPut
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
+ return x.RequestDeleteRange
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestTxn() *TxnRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
+ return x.RequestTxn
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
+ (*RequestOp_RequestRange)(nil),
+ (*RequestOp_RequestPut)(nil),
+ (*RequestOp_RequestDeleteRange)(nil),
+ (*RequestOp_RequestTxn)(nil),
+ }
+}
+
+func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*RequestOp)
+ // request
+ switch x := m.Request.(type) {
+ case *RequestOp_RequestRange:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestRange); err != nil {
+ return err
+ }
+ case *RequestOp_RequestPut:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestPut); err != nil {
+ return err
+ }
+ case *RequestOp_RequestDeleteRange:
+ _ = b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestDeleteRange); err != nil {
+ return err
+ }
+ case *RequestOp_RequestTxn:
+ _ = b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.RequestTxn); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("RequestOp.Request has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*RequestOp)
+ switch tag {
+ case 1: // request.request_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RangeRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestRange{msg}
+ return true, err
+ case 2: // request.request_put
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(PutRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestPut{msg}
+ return true, err
+ case 3: // request.request_delete_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(DeleteRangeRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestDeleteRange{msg}
+ return true, err
+ case 4: // request.request_txn
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(TxnRequest)
+ err := b.DecodeMessage(msg)
+ m.Request = &RequestOp_RequestTxn{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _RequestOp_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*RequestOp)
+ // request
+ switch x := m.Request.(type) {
+ case *RequestOp_RequestRange:
+ s := proto.Size(x.RequestRange)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *RequestOp_RequestPut:
+ s := proto.Size(x.RequestPut)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *RequestOp_RequestDeleteRange:
+ s := proto.Size(x.RequestDeleteRange)
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *RequestOp_RequestTxn:
+ s := proto.Size(x.RequestTxn)
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type ResponseOp struct {
+ // response is a union of response types returned by a transaction.
+ //
+ // Types that are valid to be assigned to Response:
+ // *ResponseOp_ResponseRange
+ // *ResponseOp_ResponsePut
+ // *ResponseOp_ResponseDeleteRange
+ // *ResponseOp_ResponseTxn
+ Response isResponseOp_Response `protobuf_oneof:"response"`
+}
+
+func (m *ResponseOp) Reset() { *m = ResponseOp{} }
+func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
+func (*ResponseOp) ProtoMessage() {}
+func (*ResponseOp) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{8} }
+
+type isResponseOp_Response interface {
+ isResponseOp_Response()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type ResponseOp_ResponseRange struct {
+ ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,oneof"`
+}
+type ResponseOp_ResponsePut struct {
+ ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,oneof"`
+}
+type ResponseOp_ResponseDeleteRange struct {
+ ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"`
+}
+type ResponseOp_ResponseTxn struct {
+ ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"`
+}
+
+func (*ResponseOp_ResponseRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponsePut) isResponseOp_Response() {}
+func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponseTxn) isResponseOp_Response() {}
+
+func (m *ResponseOp) GetResponse() isResponseOp_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseRange() *RangeResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
+ return x.ResponseRange
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponsePut() *PutResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
+ return x.ResponsePut
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
+ return x.ResponseDeleteRange
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseTxn() *TxnResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
+ return x.ResponseTxn
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{
+ (*ResponseOp_ResponseRange)(nil),
+ (*ResponseOp_ResponsePut)(nil),
+ (*ResponseOp_ResponseDeleteRange)(nil),
+ (*ResponseOp_ResponseTxn)(nil),
+ }
+}
+
+func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*ResponseOp)
+ // response
+ switch x := m.Response.(type) {
+ case *ResponseOp_ResponseRange:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponseRange); err != nil {
+ return err
+ }
+ case *ResponseOp_ResponsePut:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponsePut); err != nil {
+ return err
+ }
+ case *ResponseOp_ResponseDeleteRange:
+ _ = b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil {
+ return err
+ }
+ case *ResponseOp_ResponseTxn:
+ _ = b.EncodeVarint(4<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ResponseTxn); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("ResponseOp.Response has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*ResponseOp)
+ switch tag {
+ case 1: // response.response_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RangeResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponseRange{msg}
+ return true, err
+ case 2: // response.response_put
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(PutResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponsePut{msg}
+ return true, err
+ case 3: // response.response_delete_range
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(DeleteRangeResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponseDeleteRange{msg}
+ return true, err
+ case 4: // response.response_txn
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(TxnResponse)
+ err := b.DecodeMessage(msg)
+ m.Response = &ResponseOp_ResponseTxn{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _ResponseOp_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*ResponseOp)
+ // response
+ switch x := m.Response.(type) {
+ case *ResponseOp_ResponseRange:
+ s := proto.Size(x.ResponseRange)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ResponseOp_ResponsePut:
+ s := proto.Size(x.ResponsePut)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ResponseOp_ResponseDeleteRange:
+ s := proto.Size(x.ResponseDeleteRange)
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *ResponseOp_ResponseTxn:
+ s := proto.Size(x.ResponseTxn)
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type Compare struct {
+ // result is logical comparison operation for this comparison.
+ Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
+ // target is the key-value field to inspect for the comparison.
+ Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
+ // key is the subject key for the comparison operation.
+ Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+ // Types that are valid to be assigned to TargetUnion:
+ // *Compare_Version
+ // *Compare_CreateRevision
+ // *Compare_ModRevision
+ // *Compare_Value
+ // *Compare_Lease
+ TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
+ // range_end compares the given target to all keys in the range [key, range_end).
+ // See RangeRequest for more details on key ranges.
+ RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (m *Compare) Reset() { *m = Compare{} }
+func (m *Compare) String() string { return proto.CompactTextString(m) }
+func (*Compare) ProtoMessage() {}
+func (*Compare) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{9} }
+
+type isCompare_TargetUnion interface {
+ isCompare_TargetUnion()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Compare_Version struct {
+ Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"`
+}
+type Compare_CreateRevision struct {
+ CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"`
+}
+type Compare_ModRevision struct {
+ ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"`
+}
+type Compare_Value struct {
+ Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"`
+}
+type Compare_Lease struct {
+ Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"`
+}
+
+func (*Compare_Version) isCompare_TargetUnion() {}
+func (*Compare_CreateRevision) isCompare_TargetUnion() {}
+func (*Compare_ModRevision) isCompare_TargetUnion() {}
+func (*Compare_Value) isCompare_TargetUnion() {}
+func (*Compare_Lease) isCompare_TargetUnion() {}
+
+func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
+ if m != nil {
+ return m.TargetUnion
+ }
+ return nil
+}
+
+func (m *Compare) GetResult() Compare_CompareResult {
+ if m != nil {
+ return m.Result
+ }
+ return Compare_EQUAL
+}
+
+func (m *Compare) GetTarget() Compare_CompareTarget {
+ if m != nil {
+ return m.Target
+ }
+ return Compare_VERSION
+}
+
+func (m *Compare) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Compare) GetVersion() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
+ return x.Version
+ }
+ return 0
+}
+
+func (m *Compare) GetCreateRevision() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
+ return x.CreateRevision
+ }
+ return 0
+}
+
+func (m *Compare) GetModRevision() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
+ return x.ModRevision
+ }
+ return 0
+}
+
+func (m *Compare) GetValue() []byte {
+ if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (m *Compare) GetLease() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
+ return x.Lease
+ }
+ return 0
+}
+
+func (m *Compare) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{
+ (*Compare_Version)(nil),
+ (*Compare_CreateRevision)(nil),
+ (*Compare_ModRevision)(nil),
+ (*Compare_Value)(nil),
+ (*Compare_Lease)(nil),
+ }
+}
+
+func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Compare)
+ // target_union
+ switch x := m.TargetUnion.(type) {
+ case *Compare_Version:
+ _ = b.EncodeVarint(4<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.Version))
+ case *Compare_CreateRevision:
+ _ = b.EncodeVarint(5<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.CreateRevision))
+ case *Compare_ModRevision:
+ _ = b.EncodeVarint(6<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.ModRevision))
+ case *Compare_Value:
+ _ = b.EncodeVarint(7<<3 | proto.WireBytes)
+ _ = b.EncodeRawBytes(x.Value)
+ case *Compare_Lease:
+ _ = b.EncodeVarint(8<<3 | proto.WireVarint)
+ _ = b.EncodeVarint(uint64(x.Lease))
+ case nil:
+ default:
+ return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Compare)
+ switch tag {
+ case 4: // target_union.version
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_Version{int64(x)}
+ return true, err
+ case 5: // target_union.create_revision
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_CreateRevision{int64(x)}
+ return true, err
+ case 6: // target_union.mod_revision
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_ModRevision{int64(x)}
+ return true, err
+ case 7: // target_union.value
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeRawBytes(true)
+ m.TargetUnion = &Compare_Value{x}
+ return true, err
+ case 8: // target_union.lease
+ if wire != proto.WireVarint {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeVarint()
+ m.TargetUnion = &Compare_Lease{int64(x)}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Compare_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Compare)
+ // target_union
+ switch x := m.TargetUnion.(type) {
+ case *Compare_Version:
+ n += proto.SizeVarint(4<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Version))
+ case *Compare_CreateRevision:
+ n += proto.SizeVarint(5<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.CreateRevision))
+ case *Compare_ModRevision:
+ n += proto.SizeVarint(6<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.ModRevision))
+ case *Compare_Value:
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Value)))
+ n += len(x.Value)
+ case *Compare_Lease:
+ n += proto.SizeVarint(8<<3 | proto.WireVarint)
+ n += proto.SizeVarint(uint64(x.Lease))
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+type TxnRequest struct {
+ // compare is a list of predicates representing a conjunction of terms.
+ // If the comparisons succeed, then the success requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ // If the comparisons fail, then the failure requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ Compare []*Compare `protobuf:"bytes,1,rep,name=compare" json:"compare,omitempty"`
+ // success is a list of requests which will be applied when compare evaluates to true.
+ Success []*RequestOp `protobuf:"bytes,2,rep,name=success" json:"success,omitempty"`
+ // failure is a list of requests which will be applied when compare evaluates to false.
+ Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure" json:"failure,omitempty"`
+}
+
+func (m *TxnRequest) Reset() { *m = TxnRequest{} }
+func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
+func (*TxnRequest) ProtoMessage() {}
+func (*TxnRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{10} }
+
+func (m *TxnRequest) GetCompare() []*Compare {
+ if m != nil {
+ return m.Compare
+ }
+ return nil
+}
+
+func (m *TxnRequest) GetSuccess() []*RequestOp {
+ if m != nil {
+ return m.Success
+ }
+ return nil
+}
+
+func (m *TxnRequest) GetFailure() []*RequestOp {
+ if m != nil {
+ return m.Failure
+ }
+ return nil
+}
+
+type TxnResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // succeeded is set to true if the compare evaluated to true or false otherwise.
+ Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
+ // responses is a list of responses corresponding to the results from applying
+ // success if succeeded is true or failure if succeeded is false.
+ Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses" json:"responses,omitempty"`
+}
+
+func (m *TxnResponse) Reset() { *m = TxnResponse{} }
+func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
+func (*TxnResponse) ProtoMessage() {}
+func (*TxnResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{11} }
+
+func (m *TxnResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TxnResponse) GetSucceeded() bool {
+ if m != nil {
+ return m.Succeeded
+ }
+ return false
+}
+
+func (m *TxnResponse) GetResponses() []*ResponseOp {
+ if m != nil {
+ return m.Responses
+ }
+ return nil
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+type CompactionRequest struct {
+ // revision is the key-value store revision for the compaction operation.
+ Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+ // physical is set so the RPC will wait until the compaction is physically
+ // applied to the local database such that compacted entries are totally
+ // removed from the backend database.
+ Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
+}
+
+func (m *CompactionRequest) Reset() { *m = CompactionRequest{} }
+func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
+func (*CompactionRequest) ProtoMessage() {}
+func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} }
+
+func (m *CompactionRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *CompactionRequest) GetPhysical() bool {
+ if m != nil {
+ return m.Physical
+ }
+ return false
+}
+
+type CompactionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *CompactionResponse) Reset() { *m = CompactionResponse{} }
+func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
+func (*CompactionResponse) ProtoMessage() {}
+func (*CompactionResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{13} }
+
+func (m *CompactionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type HashRequest struct {
+}
+
+func (m *HashRequest) Reset() { *m = HashRequest{} }
+func (m *HashRequest) String() string { return proto.CompactTextString(m) }
+func (*HashRequest) ProtoMessage() {}
+func (*HashRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{14} }
+
+type HashKVRequest struct {
+ // revision is the key-value store revision for the hash operation.
+ Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+}
+
+func (m *HashKVRequest) Reset() { *m = HashKVRequest{} }
+func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
+func (*HashKVRequest) ProtoMessage() {}
+func (*HashKVRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{15} }
+
+func (m *HashKVRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+type HashKVResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+ Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+ // compact_revision is the compacted revision of key-value store when hash begins.
+ CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+}
+
+func (m *HashKVResponse) Reset() { *m = HashKVResponse{} }
+func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
+func (*HashKVResponse) ProtoMessage() {}
+func (*HashKVResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{16} }
+
+func (m *HashKVResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *HashKVResponse) GetHash() uint32 {
+ if m != nil {
+ return m.Hash
+ }
+ return 0
+}
+
+func (m *HashKVResponse) GetCompactRevision() int64 {
+ if m != nil {
+ return m.CompactRevision
+ }
+ return 0
+}
+
+type HashResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // hash is the hash value computed from the responding member's KV's backend.
+ Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+}
+
+func (m *HashResponse) Reset() { *m = HashResponse{} }
+func (m *HashResponse) String() string { return proto.CompactTextString(m) }
+func (*HashResponse) ProtoMessage() {}
+func (*HashResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{17} }
+
+func (m *HashResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *HashResponse) GetHash() uint32 {
+ if m != nil {
+ return m.Hash
+ }
+ return 0
+}
+
+type SnapshotRequest struct {
+}
+
+func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} }
+func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*SnapshotRequest) ProtoMessage() {}
+func (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{18} }
+
+type SnapshotResponse struct {
+ // header has the current key-value store information. The first header in the snapshot
+ // stream indicates the point in time of the snapshot.
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // remaining_bytes is the number of blob bytes to be sent after this message
+ RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
+ // blob contains the next chunk of the snapshot in the snapshot stream.
+ Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+}
+
+func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} }
+func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*SnapshotResponse) ProtoMessage() {}
+func (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} }
+
+func (m *SnapshotResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *SnapshotResponse) GetRemainingBytes() uint64 {
+ if m != nil {
+ return m.RemainingBytes
+ }
+ return 0
+}
+
+func (m *SnapshotResponse) GetBlob() []byte {
+ if m != nil {
+ return m.Blob
+ }
+ return nil
+}
+
+type WatchRequest struct {
+ // request_union is a request to either create a new watcher or cancel an existing watcher.
+ //
+ // Types that are valid to be assigned to RequestUnion:
+ // *WatchRequest_CreateRequest
+ // *WatchRequest_CancelRequest
+ // *WatchRequest_ProgressRequest
+ RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
+}
+
+func (m *WatchRequest) Reset() { *m = WatchRequest{} }
+func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchRequest) ProtoMessage() {}
+func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} }
+
+type isWatchRequest_RequestUnion interface {
+ isWatchRequest_RequestUnion()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type WatchRequest_CreateRequest struct {
+ CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"`
+}
+type WatchRequest_CancelRequest struct {
+ CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"`
+}
+type WatchRequest_ProgressRequest struct {
+ ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,oneof"`
+}
+
+func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {}
+func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {}
+func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
+
+func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
+ if m != nil {
+ return m.RequestUnion
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
+ return x.CreateRequest
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
+ return x.CancelRequest
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
+ return x.ProgressRequest
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
+ (*WatchRequest_CreateRequest)(nil),
+ (*WatchRequest_CancelRequest)(nil),
+ (*WatchRequest_ProgressRequest)(nil),
+ }
+}
+
+func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*WatchRequest)
+ // request_union
+ switch x := m.RequestUnion.(type) {
+ case *WatchRequest_CreateRequest:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.CreateRequest); err != nil {
+ return err
+ }
+ case *WatchRequest_CancelRequest:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.CancelRequest); err != nil {
+ return err
+ }
+ case *WatchRequest_ProgressRequest:
+ _ = b.EncodeVarint(3<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ProgressRequest); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*WatchRequest)
+ switch tag {
+ case 1: // request_union.create_request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(WatchCreateRequest)
+ err := b.DecodeMessage(msg)
+ m.RequestUnion = &WatchRequest_CreateRequest{msg}
+ return true, err
+ case 2: // request_union.cancel_request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(WatchCancelRequest)
+ err := b.DecodeMessage(msg)
+ m.RequestUnion = &WatchRequest_CancelRequest{msg}
+ return true, err
+ case 3: // request_union.progress_request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(WatchProgressRequest)
+ err := b.DecodeMessage(msg)
+ m.RequestUnion = &WatchRequest_ProgressRequest{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*WatchRequest)
+ // request_union
+ switch x := m.RequestUnion.(type) {
+ case *WatchRequest_CreateRequest:
+ s := proto.Size(x.CreateRequest)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *WatchRequest_CancelRequest:
+ s := proto.Size(x.CancelRequest)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *WatchRequest_ProgressRequest:
+ s := proto.Size(x.ProgressRequest)
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type WatchCreateRequest struct {
+ // key is the key to register for watching.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+ // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+ // or equal to the key argument are watched.
+ // If the range_end is one bit larger than the given key,
+ // then all keys with the prefix (the given key) will be watched.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+ StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
+ // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+ // no events to the new watcher if there are no recent events. It is useful when clients
+ // wish to recover a disconnected watcher starting from a recent known revision.
+ // The etcd server may decide how often it will send notifications based on current load.
+ ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
+ // filters filter the events at server side before it sends back to the watcher.
+ Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
+ // If prev_kv is set, created watcher gets the previous KV before the event happens.
+ // If the previous KV is already compacted, nothing will be returned.
+ PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ // If watch_id is provided and non-zero, it will be assigned to this watcher.
+ // Since creating a watcher in etcd is not a synchronous operation,
+ // this can be used ensure that ordering is correct when creating multiple
+ // watchers on the same stream. Creating a watcher with an ID already in
+ // use on the stream will cause an error to be returned.
+ WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+ // fragment enables splitting large revisions into multiple watch responses.
+ Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
+}
+
+func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} }
+func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCreateRequest) ProtoMessage() {}
+func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{21} }
+
+func (m *WatchCreateRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetStartRevision() int64 {
+ if m != nil {
+ return m.StartRevision
+ }
+ return 0
+}
+
+func (m *WatchCreateRequest) GetProgressNotify() bool {
+ if m != nil {
+ return m.ProgressNotify
+ }
+ return false
+}
+
+func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
+ if m != nil {
+ return m.Filters
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+func (m *WatchCreateRequest) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+func (m *WatchCreateRequest) GetFragment() bool {
+ if m != nil {
+ return m.Fragment
+ }
+ return false
+}
+
+type WatchCancelRequest struct {
+ // watch_id is the watcher id to cancel so that no more events are transmitted.
+ WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+}
+
+func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} }
+func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCancelRequest) ProtoMessage() {}
+func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} }
+
+func (m *WatchCancelRequest) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+type WatchProgressRequest struct {
+}
+
+func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} }
+func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchProgressRequest) ProtoMessage() {}
+func (*WatchProgressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{23} }
+
+type WatchResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // watch_id is the ID of the watcher that corresponds to the response.
+ WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+ // created is set to true if the response is for a create watch request.
+ // The client should record the watch_id and expect to receive events for
+ // the created watcher from the same stream.
+ // All events sent to the created watcher will attach with the same watch_id.
+ Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
+ // canceled is set to true if the response is for a cancel watch request.
+ // No further events will be sent to the canceled watcher.
+ Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
+ // compact_revision is set to the minimum index if a watcher tries to watch
+ // at a compacted index.
+ //
+ // This happens when creating a watcher at a compacted revision or the watcher cannot
+ // catch up with the progress of the key-value store.
+ //
+ // The client should treat the watcher as canceled and should not try to create any
+ // watcher with the same start_revision again.
+ CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ // cancel_reason indicates the reason for canceling the watcher.
+ CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
+ // framgment is true if large watch response was split over multiple responses.
+ Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
+ Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"`
+}
+
+func (m *WatchResponse) Reset() { *m = WatchResponse{} }
+func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
+func (*WatchResponse) ProtoMessage() {}
+func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} }
+
+func (m *WatchResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *WatchResponse) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+func (m *WatchResponse) GetCreated() bool {
+ if m != nil {
+ return m.Created
+ }
+ return false
+}
+
+func (m *WatchResponse) GetCanceled() bool {
+ if m != nil {
+ return m.Canceled
+ }
+ return false
+}
+
+func (m *WatchResponse) GetCompactRevision() int64 {
+ if m != nil {
+ return m.CompactRevision
+ }
+ return 0
+}
+
+func (m *WatchResponse) GetCancelReason() string {
+ if m != nil {
+ return m.CancelReason
+ }
+ return ""
+}
+
+func (m *WatchResponse) GetFragment() bool {
+ if m != nil {
+ return m.Fragment
+ }
+ return false
+}
+
+func (m *WatchResponse) GetEvents() []*mvccpb.Event {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+type LeaseGrantRequest struct {
+ // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+ TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} }
+func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantRequest) ProtoMessage() {}
+func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{25} }
+
+func (m *LeaseGrantRequest) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseGrantRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseGrantResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // ID is the lease ID for the granted lease.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the server chosen lease time-to-live in seconds.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} }
+func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantResponse) ProtoMessage() {}
+func (*LeaseGrantResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} }
+
+func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseGrantResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseGrantResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseGrantResponse) GetError() string {
+ if m != nil {
+ return m.Error
+ }
+ return ""
+}
+
+type LeaseRevokeRequest struct {
+ // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} }
+func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeRequest) ProtoMessage() {}
+func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} }
+
+func (m *LeaseRevokeRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseRevokeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} }
+func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeResponse) ProtoMessage() {}
+func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} }
+
+func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type LeaseCheckpoint struct {
+ // ID is the lease ID to checkpoint.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // Remaining_TTL is the remaining time until expiry of the lease.
+ Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"`
+}
+
+func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} }
+func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpoint) ProtoMessage() {}
+func (*LeaseCheckpoint) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} }
+
+func (m *LeaseCheckpoint) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseCheckpoint) GetRemaining_TTL() int64 {
+ if m != nil {
+ return m.Remaining_TTL
+ }
+ return 0
+}
+
+type LeaseCheckpointRequest struct {
+ Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"`
+}
+
+func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} }
+func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpointRequest) ProtoMessage() {}
+func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} }
+
+func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint {
+ if m != nil {
+ return m.Checkpoints
+ }
+ return nil
+}
+
+type LeaseCheckpointResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} }
+func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpointResponse) ProtoMessage() {}
+func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} }
+
+func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type LeaseKeepAliveRequest struct {
+ // ID is the lease ID for the lease to keep alive.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} }
+func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveRequest) ProtoMessage() {}
+func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{32} }
+
+func (m *LeaseKeepAliveRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseKeepAliveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // ID is the lease ID from the keep alive request.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the new time-to-live for the lease.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+}
+
+func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} }
+func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveResponse) ProtoMessage() {}
+func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} }
+
+func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseKeepAliveResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseKeepAliveResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+type LeaseTimeToLiveRequest struct {
+ // ID is the lease ID for the lease.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // keys is true to query all the keys attached to this lease.
+ Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
+}
+
+func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} }
+func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveRequest) ProtoMessage() {}
+func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{34} }
+
+func (m *LeaseTimeToLiveRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveRequest) GetKeys() bool {
+ if m != nil {
+ return m.Keys
+ }
+ return false
+}
+
+type LeaseTimeToLiveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // ID is the lease ID from the keep alive request.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+ GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
+ // Keys is the list of keys attached to this lease.
+ Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"`
+}
+
+func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} }
+func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveResponse) ProtoMessage() {}
+func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} }
+
+func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseTimeToLiveResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
+ if m != nil {
+ return m.GrantedTTL
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
+ if m != nil {
+ return m.Keys
+ }
+ return nil
+}
+
+type LeaseLeasesRequest struct {
+}
+
+func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} }
+func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesRequest) ProtoMessage() {}
+func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{36} }
+
+type LeaseStatus struct {
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *LeaseStatus) Reset() { *m = LeaseStatus{} }
+func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
+func (*LeaseStatus) ProtoMessage() {}
+func (*LeaseStatus) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{37} }
+
+func (m *LeaseStatus) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseLeasesResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases" json:"leases,omitempty"`
+}
+
+func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} }
+func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesResponse) ProtoMessage() {}
+func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{38} }
+
+func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
+ if m != nil {
+ return m.Leases
+ }
+ return nil
+}
+
+type Member struct {
+ // ID is the member ID for this member.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ // peerURLs is the list of URLs the member exposes to the cluster for communication.
+ PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs" json:"peerURLs,omitempty"`
+ // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+ ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs" json:"clientURLs,omitempty"`
+}
+
+func (m *Member) Reset() { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage() {}
+func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{39} }
+
+func (m *Member) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *Member) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Member) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+func (m *Member) GetClientURLs() []string {
+ if m != nil {
+ return m.ClientURLs
+ }
+ return nil
+}
+
+type MemberAddRequest struct {
+ // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+ PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"`
+}
+
+func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} }
+func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberAddRequest) ProtoMessage() {}
+func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{40} }
+
+func (m *MemberAddRequest) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+type MemberAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // member is the member information for the added member.
+ Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"`
+ // members is a list of all members after adding the new member.
+ Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} }
+func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberAddResponse) ProtoMessage() {}
+func (*MemberAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} }
+
+func (m *MemberAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberAddResponse) GetMember() *Member {
+ if m != nil {
+ return m.Member
+ }
+ return nil
+}
+
+func (m *MemberAddResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberRemoveRequest struct {
+ // ID is the member ID of the member to remove.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+}
+
+func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} }
+func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveRequest) ProtoMessage() {}
+func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} }
+
+func (m *MemberRemoveRequest) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type MemberRemoveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // members is a list of all members after removing the member.
+ Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} }
+func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveResponse) ProtoMessage() {}
+func (*MemberRemoveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} }
+
+func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberRemoveResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberUpdateRequest struct {
+ // ID is the member ID of the member to update.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+ PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs" json:"peerURLs,omitempty"`
+}
+
+func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} }
+func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateRequest) ProtoMessage() {}
+func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} }
+
+func (m *MemberUpdateRequest) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *MemberUpdateRequest) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+type MemberUpdateResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // members is a list of all members after updating the member.
+ Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} }
+func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateResponse) ProtoMessage() {}
+func (*MemberUpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} }
+
+func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberUpdateResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberListRequest struct {
+}
+
+func (m *MemberListRequest) Reset() { *m = MemberListRequest{} }
+func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberListRequest) ProtoMessage() {}
+func (*MemberListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} }
+
+type MemberListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // members is a list of all members associated with the cluster.
+ Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
+}
+
+func (m *MemberListResponse) Reset() { *m = MemberListResponse{} }
+func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberListResponse) ProtoMessage() {}
+func (*MemberListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} }
+
+func (m *MemberListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberListResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type DefragmentRequest struct {
+}
+
+func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} }
+func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
+func (*DefragmentRequest) ProtoMessage() {}
+func (*DefragmentRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} }
+
+type DefragmentResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} }
+func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
+func (*DefragmentResponse) ProtoMessage() {}
+func (*DefragmentResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} }
+
+func (m *DefragmentResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type MoveLeaderRequest struct {
+ // targetID is the node ID for the new leader.
+ TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
+}
+
+func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} }
+func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderRequest) ProtoMessage() {}
+func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} }
+
+func (m *MoveLeaderRequest) GetTargetID() uint64 {
+ if m != nil {
+ return m.TargetID
+ }
+ return 0
+}
+
+type MoveLeaderResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} }
+func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderResponse) ProtoMessage() {}
+func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} }
+
+func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AlarmRequest struct {
+ // action is the kind of alarm request to issue. The action
+ // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+ // raised alarm.
+ Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
+ // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+ // alarm request covers all members.
+ MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
+ // alarm is the type of alarm to consider for this request.
+ Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+}
+
+func (m *AlarmRequest) Reset() { *m = AlarmRequest{} }
+func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*AlarmRequest) ProtoMessage() {}
+func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} }
+
+func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
+ if m != nil {
+ return m.Action
+ }
+ return AlarmRequest_GET
+}
+
+func (m *AlarmRequest) GetMemberID() uint64 {
+ if m != nil {
+ return m.MemberID
+ }
+ return 0
+}
+
+func (m *AlarmRequest) GetAlarm() AlarmType {
+ if m != nil {
+ return m.Alarm
+ }
+ return AlarmType_NONE
+}
+
+type AlarmMember struct {
+ // memberID is the ID of the member associated with the raised alarm.
+ MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
+ // alarm is the type of alarm which has been raised.
+ Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+}
+
+func (m *AlarmMember) Reset() { *m = AlarmMember{} }
+func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
+func (*AlarmMember) ProtoMessage() {}
+func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} }
+
+func (m *AlarmMember) GetMemberID() uint64 {
+ if m != nil {
+ return m.MemberID
+ }
+ return 0
+}
+
+func (m *AlarmMember) GetAlarm() AlarmType {
+ if m != nil {
+ return m.Alarm
+ }
+ return AlarmType_NONE
+}
+
+type AlarmResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // alarms is a list of alarms associated with the alarm request.
+ Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms" json:"alarms,omitempty"`
+}
+
+func (m *AlarmResponse) Reset() { *m = AlarmResponse{} }
+func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
+func (*AlarmResponse) ProtoMessage() {}
+func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} }
+
+func (m *AlarmResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AlarmResponse) GetAlarms() []*AlarmMember {
+ if m != nil {
+ return m.Alarms
+ }
+ return nil
+}
+
+type StatusRequest struct {
+}
+
+func (m *StatusRequest) Reset() { *m = StatusRequest{} }
+func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
+func (*StatusRequest) ProtoMessage() {}
+func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} }
+
+type StatusResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // version is the cluster protocol version used by the responding member.
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ // dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
+ DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
+ // leader is the member ID which the responding member believes is the current leader.
+ Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
+ // raftIndex is the current raft committed index of the responding member.
+ RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
+ // raftTerm is the current raft term of the responding member.
+ RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
+ // raftAppliedIndex is the current raft applied index of the responding member.
+ RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"`
+ // errors contains alarm/health information and status.
+ Errors []string `protobuf:"bytes,8,rep,name=errors" json:"errors,omitempty"`
+ // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
+ DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"`
+}
+
+func (m *StatusResponse) Reset() { *m = StatusResponse{} }
+func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
+func (*StatusResponse) ProtoMessage() {}
+func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} }
+
+func (m *StatusResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *StatusResponse) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *StatusResponse) GetDbSize() int64 {
+ if m != nil {
+ return m.DbSize
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetLeader() uint64 {
+ if m != nil {
+ return m.Leader
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftIndex() uint64 {
+ if m != nil {
+ return m.RaftIndex
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftTerm() uint64 {
+ if m != nil {
+ return m.RaftTerm
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftAppliedIndex() uint64 {
+ if m != nil {
+ return m.RaftAppliedIndex
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetErrors() []string {
+ if m != nil {
+ return m.Errors
+ }
+ return nil
+}
+
+func (m *StatusResponse) GetDbSizeInUse() int64 {
+ if m != nil {
+ return m.DbSizeInUse
+ }
+ return 0
+}
+
+type AuthEnableRequest struct {
+}
+
+func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} }
+func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableRequest) ProtoMessage() {}
+func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} }
+
+type AuthDisableRequest struct {
+}
+
+func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} }
+func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableRequest) ProtoMessage() {}
+func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} }
+
+type AuthenticateRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} }
+func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateRequest) ProtoMessage() {}
+func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} }
+
+func (m *AuthenticateRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthenticateRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+type AuthUserAddRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} }
+func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddRequest) ProtoMessage() {}
+func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} }
+
+func (m *AuthUserAddRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserAddRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+type AuthUserGetRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} }
+func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetRequest) ProtoMessage() {}
+func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} }
+
+func (m *AuthUserGetRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthUserDeleteRequest struct {
+ // name is the name of the user to delete.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} }
+func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteRequest) ProtoMessage() {}
+func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} }
+
+func (m *AuthUserDeleteRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthUserChangePasswordRequest struct {
+ // name is the name of the user whose password is being changed.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // password is the new password for the user.
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} }
+func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordRequest) ProtoMessage() {}
+func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{63}
+}
+
+func (m *AuthUserChangePasswordRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+type AuthUserGrantRoleRequest struct {
+ // user is the name of the user which should be granted a given role.
+ User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
+ // role is the name of the role to grant to the user.
+ Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} }
+func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleRequest) ProtoMessage() {}
+func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} }
+
+func (m *AuthUserGrantRoleRequest) GetUser() string {
+ if m != nil {
+ return m.User
+ }
+ return ""
+}
+
+func (m *AuthUserGrantRoleRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthUserRevokeRoleRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} }
+func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleRequest) ProtoMessage() {}
+func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} }
+
+func (m *AuthUserRevokeRoleRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserRevokeRoleRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthRoleAddRequest struct {
+ // name is the name of the role to add to the authentication system.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} }
+func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddRequest) ProtoMessage() {}
+func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} }
+
+func (m *AuthRoleAddRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthRoleGetRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} }
+func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetRequest) ProtoMessage() {}
+func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} }
+
+func (m *AuthRoleGetRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthUserListRequest struct {
+}
+
+func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} }
+func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListRequest) ProtoMessage() {}
+func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} }
+
+type AuthRoleListRequest struct {
+}
+
+func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} }
+func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListRequest) ProtoMessage() {}
+func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} }
+
+type AuthRoleDeleteRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+}
+
+func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} }
+func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteRequest) ProtoMessage() {}
+func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} }
+
+func (m *AuthRoleDeleteRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthRoleGrantPermissionRequest struct {
+ // name is the name of the role which will be granted the permission.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // perm is the permission to grant to the role.
+ Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm" json:"perm,omitempty"`
+}
+
+func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} }
+func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionRequest) ProtoMessage() {}
+func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{71}
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
+ if m != nil {
+ return m.Perm
+ }
+ return nil
+}
+
+type AuthRoleRevokePermissionRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} }
+func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionRequest) ProtoMessage() {}
+func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{72}
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+type AuthEnableResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} }
+func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableResponse) ProtoMessage() {}
+func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} }
+
+func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthDisableResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} }
+func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableResponse) ProtoMessage() {}
+func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} }
+
+func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthenticateResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // token is an authorized token that can be used in succeeding RPCs
+ Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
+}
+
+func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} }
+func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateResponse) ProtoMessage() {}
+func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} }
+
+func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthenticateResponse) GetToken() string {
+ if m != nil {
+ return m.Token
+ }
+ return ""
+}
+
+type AuthUserAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} }
+func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddResponse) ProtoMessage() {}
+func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} }
+
+func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserGetResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"`
+}
+
+func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} }
+func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetResponse) ProtoMessage() {}
+func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} }
+
+func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthUserGetResponse) GetRoles() []string {
+ if m != nil {
+ return m.Roles
+ }
+ return nil
+}
+
+type AuthUserDeleteResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} }
+func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteResponse) ProtoMessage() {}
+func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{78} }
+
+func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserChangePasswordResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} }
+func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordResponse) ProtoMessage() {}
+func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{79}
+}
+
+func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserGrantRoleResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} }
+func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleResponse) ProtoMessage() {}
+func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{80} }
+
+func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserRevokeRoleResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} }
+func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleResponse) ProtoMessage() {}
+func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{81} }
+
+func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} }
+func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddResponse) ProtoMessage() {}
+func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{82} }
+
+func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleGetResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm" json:"perm,omitempty"`
+}
+
+func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} }
+func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetResponse) ProtoMessage() {}
+func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{83} }
+
+func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
+ if m != nil {
+ return m.Perm
+ }
+ return nil
+}
+
+type AuthRoleListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Roles []string `protobuf:"bytes,2,rep,name=roles" json:"roles,omitempty"`
+}
+
+func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} }
+func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListResponse) ProtoMessage() {}
+func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{84} }
+
+func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthRoleListResponse) GetRoles() []string {
+ if m != nil {
+ return m.Roles
+ }
+ return nil
+}
+
+type AuthUserListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"`
+}
+
+func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} }
+func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListResponse) ProtoMessage() {}
+func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{85} }
+
+func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthUserListResponse) GetUsers() []string {
+ if m != nil {
+ return m.Users
+ }
+ return nil
+}
+
+type AuthRoleDeleteResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} }
+func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteResponse) ProtoMessage() {}
+func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{86} }
+
+func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleGrantPermissionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} }
+func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionResponse) ProtoMessage() {}
+func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{87}
+}
+
+func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleRevokePermissionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} }
+func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionResponse) ProtoMessage() {}
+func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{88}
+}
+
+func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
+ proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
+ proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
+ proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
+ proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
+ proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
+ proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
+ proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
+ proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
+ proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
+ proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
+ proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
+ proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
+ proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
+ proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
+ proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
+ proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
+ proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
+ proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
+ proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
+ proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
+ proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
+ proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
+ proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
+ proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
+ proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
+ proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
+ proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
+ proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
+ proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint")
+ proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest")
+ proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse")
+ proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
+ proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
+ proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
+ proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
+ proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
+ proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
+ proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
+ proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
+ proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
+ proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
+ proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
+ proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
+ proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
+ proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
+ proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
+ proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
+ proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
+ proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
+ proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
+ proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
+ proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
+ proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
+ proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
+ proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
+ proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
+ proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
+ proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
+ proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
+ proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
+ proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
+ proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
+ proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
+ proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
+ proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
+ proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
+ proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
+ proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
+ proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
+ proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
+ proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
+ proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
+ proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
+ proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
+ proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
+ proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
+ proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
+ proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
+ proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
+ proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
+ proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
+ proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
+ proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
+ proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
+ proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
+ proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
+ proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
+ proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
+ proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
+ proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
+ proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
+ proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
+ proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
+ proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
+ proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for KV service
+
+type KVClient interface {
+ // Range gets the keys in the range from the key-value store.
+ Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
+}
+
+type kVClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewKVClient(cc *grpc.ClientConn) KVClient {
+ return &kVClient{cc}
+}
+
+func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
+ out := new(RangeResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
+ out := new(PutResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
+ out := new(DeleteRangeResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
+ out := new(TxnResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
+ out := new(CompactionResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for KV service
+
+type KVServer interface {
+ // Range gets the keys in the range from the key-value store.
+ Range(context.Context, *RangeRequest) (*RangeResponse, error)
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ Put(context.Context, *PutRequest) (*PutResponse, error)
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ Txn(context.Context, *TxnRequest) (*TxnResponse, error)
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
+}
+
+func RegisterKVServer(s *grpc.Server, srv KVServer) {
+ s.RegisterService(&_KV_serviceDesc, srv)
+}
+
+func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RangeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Range(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Range",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Range(ctx, req.(*RangeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PutRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Put(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Put",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Put(ctx, req.(*PutRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteRangeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).DeleteRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/DeleteRange",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TxnRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Txn(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Txn",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CompactionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Compact(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Compact",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _KV_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.KV",
+ HandlerType: (*KVServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Range",
+ Handler: _KV_Range_Handler,
+ },
+ {
+ MethodName: "Put",
+ Handler: _KV_Put_Handler,
+ },
+ {
+ MethodName: "DeleteRange",
+ Handler: _KV_DeleteRange_Handler,
+ },
+ {
+ MethodName: "Txn",
+ Handler: _KV_Txn_Handler,
+ },
+ {
+ MethodName: "Compact",
+ Handler: _KV_Compact_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+// Client API for Watch service
+
+type WatchClient interface {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
+}
+
+type watchClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewWatchClient(cc *grpc.ClientConn) WatchClient {
+ return &watchClient{cc}
+}
+
+func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Watch/Watch", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &watchWatchClient{stream}
+ return x, nil
+}
+
+type Watch_WatchClient interface {
+ Send(*WatchRequest) error
+ Recv() (*WatchResponse, error)
+ grpc.ClientStream
+}
+
+type watchWatchClient struct {
+ grpc.ClientStream
+}
+
+func (x *watchWatchClient) Send(m *WatchRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *watchWatchClient) Recv() (*WatchResponse, error) {
+ m := new(WatchResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// Server API for Watch service
+
+type WatchServer interface {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ Watch(Watch_WatchServer) error
+}
+
+func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
+ s.RegisterService(&_Watch_serviceDesc, srv)
+}
+
+func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(WatchServer).Watch(&watchWatchServer{stream})
+}
+
+type Watch_WatchServer interface {
+ Send(*WatchResponse) error
+ Recv() (*WatchRequest, error)
+ grpc.ServerStream
+}
+
+type watchWatchServer struct {
+ grpc.ServerStream
+}
+
+func (x *watchWatchServer) Send(m *WatchResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *watchWatchServer) Recv() (*WatchRequest, error) {
+ m := new(WatchRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _Watch_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Watch",
+ HandlerType: (*WatchServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Watch",
+ Handler: _Watch_Watch_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// Client API for Lease service
+
+type LeaseClient interface {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
+ // LeaseLeases lists all existing leases.
+ LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
+}
+
+type leaseClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
+ return &leaseClient{cc}
+}
+
+func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
+ out := new(LeaseGrantResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
+ out := new(LeaseRevokeResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Lease_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &leaseLeaseKeepAliveClient{stream}
+ return x, nil
+}
+
+type Lease_LeaseKeepAliveClient interface {
+ Send(*LeaseKeepAliveRequest) error
+ Recv() (*LeaseKeepAliveResponse, error)
+ grpc.ClientStream
+}
+
+type leaseLeaseKeepAliveClient struct {
+ grpc.ClientStream
+}
+
+func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
+ m := new(LeaseKeepAliveResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
+ out := new(LeaseTimeToLiveResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
+ out := new(LeaseLeasesResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Lease service
+
+type LeaseServer interface {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
+ // LeaseLeases lists all existing leases.
+ LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
+}
+
+func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
+ s.RegisterService(&_Lease_serviceDesc, srv)
+}
+
+func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseGrantRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseGrant(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseGrant",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseRevokeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseRevoke(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
+}
+
+type Lease_LeaseKeepAliveServer interface {
+ Send(*LeaseKeepAliveResponse) error
+ Recv() (*LeaseKeepAliveRequest, error)
+ grpc.ServerStream
+}
+
+type leaseLeaseKeepAliveServer struct {
+ grpc.ServerStream
+}
+
+func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
+ m := new(LeaseKeepAliveRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseTimeToLiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseLeasesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseLeases(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseLeases",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Lease_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Lease",
+ HandlerType: (*LeaseServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "LeaseGrant",
+ Handler: _Lease_LeaseGrant_Handler,
+ },
+ {
+ MethodName: "LeaseRevoke",
+ Handler: _Lease_LeaseRevoke_Handler,
+ },
+ {
+ MethodName: "LeaseTimeToLive",
+ Handler: _Lease_LeaseTimeToLive_Handler,
+ },
+ {
+ MethodName: "LeaseLeases",
+ Handler: _Lease_LeaseLeases_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "LeaseKeepAlive",
+ Handler: _Lease_LeaseKeepAlive_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// Client API for Cluster service
+
+type ClusterClient interface {
+ // MemberAdd adds a member into the cluster.
+ MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
+ // MemberRemove removes an existing member from the cluster.
+ MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
+ // MemberUpdate updates the member configuration.
+ MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
+ // MemberList lists all the members in the cluster.
+ MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
+}
+
+type clusterClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
+ return &clusterClient{cc}
+}
+
+func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
+ out := new(MemberAddResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
+ out := new(MemberRemoveResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
+ out := new(MemberUpdateResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
+ out := new(MemberListResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Cluster service
+
+type ClusterServer interface {
+ // MemberAdd adds a member into the cluster.
+ MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
+ // MemberRemove removes an existing member from the cluster.
+ MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
+ // MemberUpdate updates the member configuration.
+ MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
+ // MemberList lists all the members in the cluster.
+ MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
+}
+
+func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
+ s.RegisterService(&_Cluster_serviceDesc, srv)
+}
+
+func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberRemoveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberRemove(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberRemove",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberUpdateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberUpdate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Cluster_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Cluster",
+ HandlerType: (*ClusterServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "MemberAdd",
+ Handler: _Cluster_MemberAdd_Handler,
+ },
+ {
+ MethodName: "MemberRemove",
+ Handler: _Cluster_MemberRemove_Handler,
+ },
+ {
+ MethodName: "MemberUpdate",
+ Handler: _Cluster_MemberUpdate_Handler,
+ },
+ {
+ MethodName: "MemberList",
+ Handler: _Cluster_MemberList_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+// Client API for Maintenance service
+
+type MaintenanceClient interface {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
+ // Status gets the status of the member.
+ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+ // Defragment defragments a member's backend database to recover storage space.
+ Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
+ // Hash computes the hash of whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashKV" API instead for "key" bucket consistency checks.
+ Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ // It only iterates "key" bucket in backend storage.
+ HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
+}
+
+type maintenanceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
+ return &maintenanceClient{cc}
+}
+
+func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
+ out := new(AlarmResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+ out := new(StatusResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
+ out := new(DefragmentResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
+ out := new(HashResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
+ out := new(HashKVResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Maintenance_serviceDesc.Streams[0], c.cc, "/etcdserverpb.Maintenance/Snapshot", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &maintenanceSnapshotClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Maintenance_SnapshotClient interface {
+ Recv() (*SnapshotResponse, error)
+ grpc.ClientStream
+}
+
+type maintenanceSnapshotClient struct {
+ grpc.ClientStream
+}
+
+func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
+ m := new(SnapshotResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
+ out := new(MoveLeaderResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Maintenance service
+
+type MaintenanceServer interface {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
+ // Status gets the status of the member.
+ Status(context.Context, *StatusRequest) (*StatusResponse, error)
+ // Defragment defragments a member's backend database to recover storage space.
+ Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
+ // Hash computes the hash of whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashKV" API instead for "key" bucket consistency checks.
+ Hash(context.Context, *HashRequest) (*HashResponse, error)
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ // It only iterates "key" bucket in backend storage.
+ HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
+}
+
+func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
+ s.RegisterService(&_Maintenance_serviceDesc, srv)
+}
+
+func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AlarmRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Alarm(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Alarm",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Status(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Status",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DefragmentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Defragment(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Defragment",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HashRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Hash(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Hash",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HashKVRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).HashKV(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/HashKV",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SnapshotRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
+}
+
+type Maintenance_SnapshotServer interface {
+ Send(*SnapshotResponse) error
+ grpc.ServerStream
+}
+
+type maintenanceSnapshotServer struct {
+ grpc.ServerStream
+}
+
+func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MoveLeaderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).MoveLeader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Maintenance_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Maintenance",
+ HandlerType: (*MaintenanceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Alarm",
+ Handler: _Maintenance_Alarm_Handler,
+ },
+ {
+ MethodName: "Status",
+ Handler: _Maintenance_Status_Handler,
+ },
+ {
+ MethodName: "Defragment",
+ Handler: _Maintenance_Defragment_Handler,
+ },
+ {
+ MethodName: "Hash",
+ Handler: _Maintenance_Hash_Handler,
+ },
+ {
+ MethodName: "HashKV",
+ Handler: _Maintenance_HashKV_Handler,
+ },
+ {
+ MethodName: "MoveLeader",
+ Handler: _Maintenance_MoveLeader_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Snapshot",
+ Handler: _Maintenance_Snapshot_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// Client API for Auth service
+
+type AuthClient interface {
+ // AuthEnable enables authentication.
+ AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
+ // AuthDisable disables authentication.
+ AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
+ // Authenticate processes an authenticate request.
+ Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
+ // UserAdd adds a new user.
+ UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
+ // UserGet gets detailed user information.
+ UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
+ // UserList gets a list of all users.
+ UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
+ // UserDelete deletes a specified user.
+ UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
+ // UserChangePassword changes the password of a specified user.
+ UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
+ // UserGrant grants a role to a specified user.
+ UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
+ // UserRevokeRole revokes a role of specified user.
+ UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
+ // RoleAdd adds a new role.
+ RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
+ // RoleGet gets detailed role information.
+ RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
+ // RoleList gets lists of all roles.
+ RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
+ // RoleDelete deletes a specified role.
+ RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
+}
+
+type authClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewAuthClient(cc *grpc.ClientConn) AuthClient {
+ return &authClient{cc}
+}
+
+func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
+ out := new(AuthEnableResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
+ out := new(AuthDisableResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
+ out := new(AuthenticateResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
+ out := new(AuthUserAddResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
+ out := new(AuthUserGetResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
+ out := new(AuthUserListResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
+ out := new(AuthUserDeleteResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
+ out := new(AuthUserChangePasswordResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
+ out := new(AuthUserGrantRoleResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
+ out := new(AuthUserRevokeRoleResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
+ out := new(AuthRoleAddResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
+ out := new(AuthRoleGetResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
+ out := new(AuthRoleListResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
+ out := new(AuthRoleDeleteResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
+ out := new(AuthRoleGrantPermissionResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
+ out := new(AuthRoleRevokePermissionResponse)
+ err := grpc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Auth service
+
+type AuthServer interface {
+ // AuthEnable enables authentication.
+ AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
+ // AuthDisable disables authentication.
+ AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
+ // Authenticate processes an authenticate request.
+ Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
+ // UserAdd adds a new user.
+ UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
+ // UserGet gets detailed user information.
+ UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
+ // UserList gets a list of all users.
+ UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
+ // UserDelete deletes a specified user.
+ UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
+ // UserChangePassword changes the password of a specified user.
+ UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
+ // UserGrant grants a role to a specified user.
+ UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
+ // UserRevokeRole revokes a role of specified user.
+ UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
+ // RoleAdd adds a new role.
+ RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
+ // RoleGet gets detailed role information.
+ RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
+ // RoleList gets lists of all roles.
+ RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
+ // RoleDelete deletes a specified role.
+ RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
+}
+
+func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
+ s.RegisterService(&_Auth_serviceDesc, srv)
+}
+
+func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthEnableRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).AuthEnable(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/AuthEnable",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthDisableRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).AuthDisable(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/AuthDisable",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthenticateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).Authenticate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/Authenticate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserGetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserGet",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserChangePasswordRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserChangePassword(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserChangePassword",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserGrantRoleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserGrantRole(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserGrantRole",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserRevokeRoleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserRevokeRole(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleGetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleGet",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleGrantPermissionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleGrantPermission(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleRevokePermissionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleRevokePermission(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Auth_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Auth",
+ HandlerType: (*AuthServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "AuthEnable",
+ Handler: _Auth_AuthEnable_Handler,
+ },
+ {
+ MethodName: "AuthDisable",
+ Handler: _Auth_AuthDisable_Handler,
+ },
+ {
+ MethodName: "Authenticate",
+ Handler: _Auth_Authenticate_Handler,
+ },
+ {
+ MethodName: "UserAdd",
+ Handler: _Auth_UserAdd_Handler,
+ },
+ {
+ MethodName: "UserGet",
+ Handler: _Auth_UserGet_Handler,
+ },
+ {
+ MethodName: "UserList",
+ Handler: _Auth_UserList_Handler,
+ },
+ {
+ MethodName: "UserDelete",
+ Handler: _Auth_UserDelete_Handler,
+ },
+ {
+ MethodName: "UserChangePassword",
+ Handler: _Auth_UserChangePassword_Handler,
+ },
+ {
+ MethodName: "UserGrantRole",
+ Handler: _Auth_UserGrantRole_Handler,
+ },
+ {
+ MethodName: "UserRevokeRole",
+ Handler: _Auth_UserRevokeRole_Handler,
+ },
+ {
+ MethodName: "RoleAdd",
+ Handler: _Auth_RoleAdd_Handler,
+ },
+ {
+ MethodName: "RoleGet",
+ Handler: _Auth_RoleGet_Handler,
+ },
+ {
+ MethodName: "RoleList",
+ Handler: _Auth_RoleList_Handler,
+ },
+ {
+ MethodName: "RoleDelete",
+ Handler: _Auth_RoleDelete_Handler,
+ },
+ {
+ MethodName: "RoleGrantPermission",
+ Handler: _Auth_RoleGrantPermission_Handler,
+ },
+ {
+ MethodName: "RoleRevokePermission",
+ Handler: _Auth_RoleRevokePermission_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ClusterId != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
+ }
+ if m.MemberId != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
+ }
+ if m.Revision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ if m.RaftTerm != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+ }
+ return i, nil
+}
+
+func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ if m.Limit != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
+ }
+ if m.Revision != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ if m.SortOrder != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
+ }
+ if m.SortTarget != 0 {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
+ }
+ if m.Serializable {
+ dAtA[i] = 0x38
+ i++
+ if m.Serializable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.KeysOnly {
+ dAtA[i] = 0x40
+ i++
+ if m.KeysOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.CountOnly {
+ dAtA[i] = 0x48
+ i++
+ if m.CountOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.MinModRevision != 0 {
+ dAtA[i] = 0x50
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
+ }
+ if m.MaxModRevision != 0 {
+ dAtA[i] = 0x58
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
+ }
+ if m.MinCreateRevision != 0 {
+ dAtA[i] = 0x60
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
+ }
+ if m.MaxCreateRevision != 0 {
+ dAtA[i] = 0x68
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
+ }
+ return i, nil
+}
+
+func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n1, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if len(m.Kvs) > 0 {
+ for _, msg := range m.Kvs {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if m.More {
+ dAtA[i] = 0x18
+ i++
+ if m.More {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.Count != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Count))
+ }
+ return i, nil
+}
+
+func (m *PutRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ if m.Lease != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+ }
+ if m.PrevKv {
+ dAtA[i] = 0x20
+ i++
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.IgnoreValue {
+ dAtA[i] = 0x28
+ i++
+ if m.IgnoreValue {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.IgnoreLease {
+ dAtA[i] = 0x30
+ i++
+ if m.IgnoreLease {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *PutResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n2, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ if m.PrevKv != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.PrevKv.Size()))
+ n3, err := m.PrevKv.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ return i, nil
+}
+
+func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ if m.PrevKv {
+ dAtA[i] = 0x18
+ i++
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n4, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ if m.Deleted != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
+ }
+ if len(m.PrevKvs) > 0 {
+ for _, msg := range m.PrevKvs {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *RequestOp) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Request != nil {
+ nn5, err := m.Request.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn5
+ }
+ return i, nil
+}
+
+func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestRange != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestRange.Size()))
+ n6, err := m.RequestRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestPut != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestPut.Size()))
+ n7, err := m.RequestPut.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ return i, nil
+}
+func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestDeleteRange != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestDeleteRange.Size()))
+ n8, err := m.RequestDeleteRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.RequestTxn != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size()))
+ n9, err := m.RequestTxn.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n9
+ }
+ return i, nil
+}
+func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Response != nil {
+ nn10, err := m.Response.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn10
+ }
+ return i, nil
+}
+
+func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponseRange != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size()))
+ n11, err := m.ResponseRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n11
+ }
+ return i, nil
+}
+func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponsePut != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size()))
+ n12, err := m.ResponsePut.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n12
+ }
+ return i, nil
+}
+func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponseDeleteRange != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size()))
+ n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n13
+ }
+ return i, nil
+}
+func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ResponseTxn != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size()))
+ n14, err := m.ResponseTxn.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n14
+ }
+ return i, nil
+}
+func (m *Compare) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Result != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Result))
+ }
+ if m.Target != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Target))
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if m.TargetUnion != nil {
+ nn15, err := m.TargetUnion.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn15
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x82
+ i++
+ dAtA[i] = 0x4
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ return i, nil
+}
+
+func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Version))
+ return i, nil
+}
+func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
+ return i, nil
+}
+func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
+ return i, nil
+}
+func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.Value != nil {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ return i, nil
+}
+func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0x40
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+ return i, nil
+}
+func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Compare) > 0 {
+ for _, msg := range m.Compare {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Success) > 0 {
+ for _, msg := range m.Success {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Failure) > 0 {
+ for _, msg := range m.Failure {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n16, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n16
+ }
+ if m.Succeeded {
+ dAtA[i] = 0x10
+ i++
+ if m.Succeeded {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if len(m.Responses) > 0 {
+ for _, msg := range m.Responses {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ if m.Physical {
+ dAtA[i] = 0x10
+ i++
+ if m.Physical {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n17, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n17
+ }
+ return i, nil
+}
+
+func (m *HashRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ }
+ return i, nil
+}
+
+func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n18, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n18
+ }
+ if m.Hash != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+ }
+ if m.CompactRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+ }
+ return i, nil
+}
+
+func (m *HashResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n19, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n19
+ }
+ if m.Hash != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+ }
+ return i, nil
+}
+
+func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n20, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n20
+ }
+ if m.RemainingBytes != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
+ }
+ if len(m.Blob) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
+ i += copy(dAtA[i:], m.Blob)
+ }
+ return i, nil
+}
+
+func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.RequestUnion != nil {
+ nn21, err := m.RequestUnion.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn21
+ }
+ return i, nil
+}
+
+func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.CreateRequest != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size()))
+ n22, err := m.CreateRequest.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n22
+ }
+ return i, nil
+}
+func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.CancelRequest != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size()))
+ n23, err := m.CancelRequest.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n23
+ }
+ return i, nil
+}
+func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.ProgressRequest != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ProgressRequest.Size()))
+ n24, err := m.ProgressRequest.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n24
+ }
+ return i, nil
+}
+func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ if m.StartRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
+ }
+ if m.ProgressNotify {
+ dAtA[i] = 0x20
+ i++
+ if m.ProgressNotify {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if len(m.Filters) > 0 {
+ dAtA26 := make([]byte, len(m.Filters)*10)
+ var j25 int
+ for _, num := range m.Filters {
+ for num >= 1<<7 {
+ dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j25++
+ }
+ dAtA26[j25] = uint8(num)
+ j25++
+ }
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(j25))
+ i += copy(dAtA[i:], dAtA26[:j25])
+ }
+ if m.PrevKv {
+ dAtA[i] = 0x30
+ i++
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.WatchId != 0 {
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ }
+ if m.Fragment {
+ dAtA[i] = 0x40
+ i++
+ if m.Fragment {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.WatchId != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ }
+ return i, nil
+}
+
+func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n27, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n27
+ }
+ if m.WatchId != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ }
+ if m.Created {
+ dAtA[i] = 0x18
+ i++
+ if m.Created {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.Canceled {
+ dAtA[i] = 0x20
+ i++
+ if m.Canceled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.CompactRevision != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+ }
+ if len(m.CancelReason) > 0 {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
+ i += copy(dAtA[i:], m.CancelReason)
+ }
+ if m.Fragment {
+ dAtA[i] = 0x38
+ i++
+ if m.Fragment {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if len(m.Events) > 0 {
+ for _, msg := range m.Events {
+ dAtA[i] = 0x5a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.TTL != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n28, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n28
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ if len(m.Error) > 0 {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
+ i += copy(dAtA[i:], m.Error)
+ }
+ return i, nil
+}
+
+func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n29, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n29
+ }
+ return i, nil
+}
+
+func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.Remaining_TTL != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL))
+ }
+ return i, nil
+}
+
+func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Checkpoints) > 0 {
+ for _, msg := range m.Checkpoints {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n30, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n30
+ }
+ return i, nil
+}
+
+func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n31, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n31
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ return i, nil
+}
+
+func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.Keys {
+ dAtA[i] = 0x10
+ i++
+ if m.Keys {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ return i, nil
+}
+
+func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n32, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n32
+ }
+ if m.ID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ }
+ if m.GrantedTTL != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
+ }
+ if len(m.Keys) > 0 {
+ for _, b := range m.Keys {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(b)))
+ i += copy(dAtA[i:], b)
+ }
+ }
+ return i, nil
+}
+
+func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n33, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n33
+ }
+ if len(m.Leases) > 0 {
+ for _, msg := range m.Leases {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if len(m.Name) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ dAtA[i] = 0x1a
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ if len(m.ClientURLs) > 0 {
+ for _, s := range m.ClientURLs {
+ dAtA[i] = 0x22
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ dAtA[i] = 0xa
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n34, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n34
+ }
+ if m.Member != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size()))
+ n35, err := m.Member.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n35
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ return i, nil
+}
+
+func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n36, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n36
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n37, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n37
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n38, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n38
+ }
+ if len(m.Members) > 0 {
+ for _, msg := range m.Members {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n39, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n39
+ }
+ return i, nil
+}
+
+func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.TargetID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
+ }
+ return i, nil
+}
+
+func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n40, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n40
+ }
+ return i, nil
+}
+
+func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Action != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+ }
+ if m.MemberID != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+ }
+ return i, nil
+}
+
+func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.MemberID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+ }
+ return i, nil
+}
+
+func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n41, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n41
+ }
+ if len(m.Alarms) > 0 {
+ for _, msg := range m.Alarms {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n42, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n42
+ }
+ if len(m.Version) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+ i += copy(dAtA[i:], m.Version)
+ }
+ if m.DbSize != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
+ }
+ if m.Leader != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
+ }
+ if m.RaftIndex != 0 {
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
+ }
+ if m.RaftTerm != 0 {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+ }
+ if m.RaftAppliedIndex != 0 {
+ dAtA[i] = 0x38
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex))
+ }
+ if len(m.Errors) > 0 {
+ for _, s := range m.Errors {
+ dAtA[i] = 0x42
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ if m.DbSizeInUse != 0 {
+ dAtA[i] = 0x48
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse))
+ }
+ return i, nil
+}
+
+func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ return i, nil
+}
+
+func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ return i, nil
+}
+
+func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Password) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i += copy(dAtA[i:], m.Password)
+ }
+ return i, nil
+}
+
+func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.User) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
+ i += copy(dAtA[i:], m.User)
+ }
+ if len(m.Role) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Role) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Role) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
+func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Role) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if m.Perm != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size()))
+ n43, err := m.Perm.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n43
+ }
+ return i, nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Role) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i += copy(dAtA[i:], m.Role)
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if len(m.RangeEnd) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i += copy(dAtA[i:], m.RangeEnd)
+ }
+ return i, nil
+}
+
+func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n44, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n44
+ }
+ return i, nil
+}
+
+func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n45, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n45
+ }
+ return i, nil
+}
+
+func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n46, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n46
+ }
+ if len(m.Token) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
+ i += copy(dAtA[i:], m.Token)
+ }
+ return i, nil
+}
+
+func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n47, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n47
+ }
+ return i, nil
+}
+
+func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n48, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n48
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n49, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n49
+ }
+ return i, nil
+}
+
+func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n50, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n50
+ }
+ return i, nil
+}
+
+func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n51, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n51
+ }
+ return i, nil
+}
+
+func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n52, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n52
+ }
+ return i, nil
+}
+
+func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n53, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n53
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n54, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n54
+ }
+ if len(m.Perm) > 0 {
+ for _, msg := range m.Perm {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n55, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n55
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n56, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n56
+ }
+ if len(m.Users) > 0 {
+ for _, s := range m.Users {
+ dAtA[i] = 0x12
+ i++
+ l = len(s)
+ for l >= 1<<7 {
+ dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+ l >>= 7
+ i++
+ }
+ dAtA[i] = uint8(l)
+ i++
+ i += copy(dAtA[i:], s)
+ }
+ }
+ return i, nil
+}
+
+func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n57, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n57
+ }
+ return i, nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n58, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n58
+ }
+ return i, nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size()))
+ n59, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n59
+ }
+ return i, nil
+}
+
+func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *ResponseHeader) Size() (n int) {
+ var l int
+ _ = l
+ if m.ClusterId != 0 {
+ n += 1 + sovRpc(uint64(m.ClusterId))
+ }
+ if m.MemberId != 0 {
+ n += 1 + sovRpc(uint64(m.MemberId))
+ }
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.RaftTerm != 0 {
+ n += 1 + sovRpc(uint64(m.RaftTerm))
+ }
+ return n
+}
+
+func (m *RangeRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Limit != 0 {
+ n += 1 + sovRpc(uint64(m.Limit))
+ }
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.SortOrder != 0 {
+ n += 1 + sovRpc(uint64(m.SortOrder))
+ }
+ if m.SortTarget != 0 {
+ n += 1 + sovRpc(uint64(m.SortTarget))
+ }
+ if m.Serializable {
+ n += 2
+ }
+ if m.KeysOnly {
+ n += 2
+ }
+ if m.CountOnly {
+ n += 2
+ }
+ if m.MinModRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MinModRevision))
+ }
+ if m.MaxModRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MaxModRevision))
+ }
+ if m.MinCreateRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MinCreateRevision))
+ }
+ if m.MaxCreateRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MaxCreateRevision))
+ }
+ return n
+}
+
+func (m *RangeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Kvs) > 0 {
+ for _, e := range m.Kvs {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.More {
+ n += 2
+ }
+ if m.Count != 0 {
+ n += 1 + sovRpc(uint64(m.Count))
+ }
+ return n
+}
+
+func (m *PutRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovRpc(uint64(m.Lease))
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ if m.IgnoreValue {
+ n += 2
+ }
+ if m.IgnoreLease {
+ n += 2
+ }
+ return n
+}
+
+func (m *PutResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.PrevKv != nil {
+ l = m.PrevKv.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *DeleteRangeRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ return n
+}
+
+func (m *DeleteRangeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Deleted != 0 {
+ n += 1 + sovRpc(uint64(m.Deleted))
+ }
+ if len(m.PrevKvs) > 0 {
+ for _, e := range m.PrevKvs {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *RequestOp) Size() (n int) {
+ var l int
+ _ = l
+ if m.Request != nil {
+ n += m.Request.Size()
+ }
+ return n
+}
+
+func (m *RequestOp_RequestRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestRange != nil {
+ l = m.RequestRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestPut) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestPut != nil {
+ l = m.RequestPut.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestDeleteRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestDeleteRange != nil {
+ l = m.RequestDeleteRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestTxn) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestTxn != nil {
+ l = m.RequestTxn.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp) Size() (n int) {
+ var l int
+ _ = l
+ if m.Response != nil {
+ n += m.Response.Size()
+ }
+ return n
+}
+
+func (m *ResponseOp_ResponseRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponseRange != nil {
+ l = m.ResponseRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponsePut) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponsePut != nil {
+ l = m.ResponsePut.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponseDeleteRange != nil {
+ l = m.ResponseDeleteRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponseTxn) Size() (n int) {
+ var l int
+ _ = l
+ if m.ResponseTxn != nil {
+ l = m.ResponseTxn.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *Compare) Size() (n int) {
+ var l int
+ _ = l
+ if m.Result != 0 {
+ n += 1 + sovRpc(uint64(m.Result))
+ }
+ if m.Target != 0 {
+ n += 1 + sovRpc(uint64(m.Target))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.TargetUnion != nil {
+ n += m.TargetUnion.Size()
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 2 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *Compare_Version) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.Version))
+ return n
+}
+func (m *Compare_CreateRevision) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.CreateRevision))
+ return n
+}
+func (m *Compare_ModRevision) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.ModRevision))
+ return n
+}
+func (m *Compare_Value) Size() (n int) {
+ var l int
+ _ = l
+ if m.Value != nil {
+ l = len(m.Value)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *Compare_Lease) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.Lease))
+ return n
+}
+func (m *TxnRequest) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Compare) > 0 {
+ for _, e := range m.Compare {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.Success) > 0 {
+ for _, e := range m.Success {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.Failure) > 0 {
+ for _, e := range m.Failure {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TxnResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Succeeded {
+ n += 2
+ }
+ if len(m.Responses) > 0 {
+ for _, e := range m.Responses {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *CompactionRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.Physical {
+ n += 2
+ }
+ return n
+}
+
+func (m *CompactionResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *HashRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *HashKVRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ return n
+}
+
+func (m *HashKVResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Hash != 0 {
+ n += 1 + sovRpc(uint64(m.Hash))
+ }
+ if m.CompactRevision != 0 {
+ n += 1 + sovRpc(uint64(m.CompactRevision))
+ }
+ return n
+}
+
+func (m *HashResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Hash != 0 {
+ n += 1 + sovRpc(uint64(m.Hash))
+ }
+ return n
+}
+
+func (m *SnapshotRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *SnapshotResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.RemainingBytes != 0 {
+ n += 1 + sovRpc(uint64(m.RemainingBytes))
+ }
+ l = len(m.Blob)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *WatchRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.RequestUnion != nil {
+ n += m.RequestUnion.Size()
+ }
+ return n
+}
+
+func (m *WatchRequest_CreateRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.CreateRequest != nil {
+ l = m.CreateRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchRequest_CancelRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.CancelRequest != nil {
+ l = m.CancelRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchRequest_ProgressRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ProgressRequest != nil {
+ l = m.ProgressRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchCreateRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.StartRevision != 0 {
+ n += 1 + sovRpc(uint64(m.StartRevision))
+ }
+ if m.ProgressNotify {
+ n += 2
+ }
+ if len(m.Filters) > 0 {
+ l = 0
+ for _, e := range m.Filters {
+ l += sovRpc(uint64(e))
+ }
+ n += 1 + sovRpc(uint64(l)) + l
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ if m.Fragment {
+ n += 2
+ }
+ return n
+}
+
+func (m *WatchCancelRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ return n
+}
+
+func (m *WatchProgressRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *WatchResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ if m.Created {
+ n += 2
+ }
+ if m.Canceled {
+ n += 2
+ }
+ if m.CompactRevision != 0 {
+ n += 1 + sovRpc(uint64(m.CompactRevision))
+ }
+ l = len(m.CancelReason)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Fragment {
+ n += 2
+ }
+ if len(m.Events) > 0 {
+ for _, e := range m.Events {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LeaseGrantRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseGrantResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ l = len(m.Error)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaseRevokeRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseRevokeResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaseCheckpoint) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.Remaining_TTL != 0 {
+ n += 1 + sovRpc(uint64(m.Remaining_TTL))
+ }
+ return n
+}
+
+func (m *LeaseCheckpointRequest) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Checkpoints) > 0 {
+ for _, e := range m.Checkpoints {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LeaseCheckpointResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaseKeepAliveRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseKeepAliveResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ return n
+}
+
+func (m *LeaseTimeToLiveRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.Keys {
+ n += 2
+ }
+ return n
+}
+
+func (m *LeaseTimeToLiveResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ if m.GrantedTTL != 0 {
+ n += 1 + sovRpc(uint64(m.GrantedTTL))
+ }
+ if len(m.Keys) > 0 {
+ for _, b := range m.Keys {
+ l = len(b)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LeaseLeasesRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *LeaseStatus) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *LeaseLeasesResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Leases) > 0 {
+ for _, e := range m.Leases {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Member) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.ClientURLs) > 0 {
+ for _, s := range m.ClientURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberAddRequest) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberAddResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Member != nil {
+ l = m.Member.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberRemoveRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ return n
+}
+
+func (m *MemberRemoveResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberUpdateRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberUpdateResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *MemberListRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *MemberListResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DefragmentRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *DefragmentResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *MoveLeaderRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.TargetID != 0 {
+ n += 1 + sovRpc(uint64(m.TargetID))
+ }
+ return n
+}
+
+func (m *MoveLeaderResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AlarmRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Action != 0 {
+ n += 1 + sovRpc(uint64(m.Action))
+ }
+ if m.MemberID != 0 {
+ n += 1 + sovRpc(uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ n += 1 + sovRpc(uint64(m.Alarm))
+ }
+ return n
+}
+
+func (m *AlarmMember) Size() (n int) {
+ var l int
+ _ = l
+ if m.MemberID != 0 {
+ n += 1 + sovRpc(uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ n += 1 + sovRpc(uint64(m.Alarm))
+ }
+ return n
+}
+
+func (m *AlarmResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Alarms) > 0 {
+ for _, e := range m.Alarms {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.DbSize != 0 {
+ n += 1 + sovRpc(uint64(m.DbSize))
+ }
+ if m.Leader != 0 {
+ n += 1 + sovRpc(uint64(m.Leader))
+ }
+ if m.RaftIndex != 0 {
+ n += 1 + sovRpc(uint64(m.RaftIndex))
+ }
+ if m.RaftTerm != 0 {
+ n += 1 + sovRpc(uint64(m.RaftTerm))
+ }
+ if m.RaftAppliedIndex != 0 {
+ n += 1 + sovRpc(uint64(m.RaftAppliedIndex))
+ }
+ if len(m.Errors) > 0 {
+ for _, s := range m.Errors {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.DbSizeInUse != 0 {
+ n += 1 + sovRpc(uint64(m.DbSizeInUse))
+ }
+ return n
+}
+
+func (m *AuthEnableRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthDisableRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthenticateRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserAddRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGetRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserDeleteRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserChangePasswordRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGrantRoleRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.User)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserRevokeRoleRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleAddRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGetRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserListRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthRoleListRequest) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
+func (m *AuthRoleDeleteRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Perm != nil {
+ l = m.Perm.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthEnableResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthDisableResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthenticateResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Token)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserAddResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGetResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthUserDeleteResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserChangePasswordResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserGrantRoleResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthUserRevokeRoleResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleAddResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGetResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Perm) > 0 {
+ for _, e := range m.Perm {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthRoleListResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthUserListResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Users) > 0 {
+ for _, s := range m.Users {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *AuthRoleDeleteResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+
+func sovRpc(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRpc(x uint64) (n int) {
+ return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ m.ClusterId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ClusterId |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
+ }
+ m.MemberId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberId |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+ }
+ m.RaftTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftTerm |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RangeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
+ }
+ m.SortOrder = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.SortOrder |= (RangeRequest_SortOrder(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
+ }
+ m.SortTarget = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.SortTarget |= (RangeRequest_SortTarget(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Serializable = bool(v != 0)
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.KeysOnly = bool(v != 0)
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CountOnly = bool(v != 0)
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
+ }
+ m.MinModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinModRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
+ }
+ m.MaxModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxModRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
+ }
+ m.MinCreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinCreateRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
+ }
+ m.MaxCreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxCreateRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RangeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
+ if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.More = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Count |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PutRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IgnoreValue = bool(v != 0)
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IgnoreLease = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PutResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PrevKv == nil {
+ m.PrevKv = &mvccpb.KeyValue{}
+ }
+ if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
+ }
+ m.Deleted = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Deleted |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
+ if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RequestOp) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &RangeRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestRange{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &PutRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestPut{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DeleteRangeRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestDeleteRange{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TxnRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestTxn{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResponseOp) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &RangeResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseRange{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &PutResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponsePut{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DeleteRangeResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseDeleteRange{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TxnResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseTxn{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Compare) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Compare: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+ }
+ m.Result = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Result |= (Compare_CompareResult(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+ }
+ m.Target = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Target |= (Compare_CompareTarget(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_Version{v}
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_CreateRevision{v}
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_ModRevision{v}
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := make([]byte, postIndex-iNdEx)
+ copy(v, dAtA[iNdEx:postIndex])
+ m.TargetUnion = &Compare_Value{v}
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_Lease{v}
+ case 64:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TxnRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Compare = append(m.Compare, &Compare{})
+ if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Success = append(m.Success, &RequestOp{})
+ if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Failure = append(m.Failure, &RequestOp{})
+ if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TxnResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Succeeded = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Responses = append(m.Responses, &ResponseOp{})
+ if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Physical = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ m.Hash = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Hash |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+ }
+ m.CompactRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CompactRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ m.Hash = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Hash |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
+ }
+ m.RemainingBytes = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RemainingBytes |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
+ if m.Blob == nil {
+ m.Blob = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchCreateRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_CreateRequest{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchCancelRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_CancelRequest{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchProgressRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_ProgressRequest{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
+ }
+ m.StartRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StartRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ProgressNotify = bool(v != 0)
+ case 5:
+ if wireType == 0 {
+ var v WatchCreateRequest_FilterType
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Filters = append(m.Filters, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ for iNdEx < postIndex {
+ var v WatchCreateRequest_FilterType
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Filters = append(m.Filters, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Fragment = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Created = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Canceled = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+ }
+ m.CompactRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CompactRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CancelReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Fragment = bool(v != 0)
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Events = append(m.Events, &mvccpb.Event{})
+ if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Error = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType)
+ }
+ m.Remaining_TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Remaining_TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{})
+ if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Keys = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
+ }
+ m.GrantedTTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.GrantedTTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
+ copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Leases = append(m.Leases, &LeaseStatus{})
+ if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Member: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Member == nil {
+ m.Member = &Member{}
+ }
+ if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
+ }
+ m.TargetID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TargetID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Action |= (AlarmRequest_AlarmAction(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+ }
+ m.MemberID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ m.Alarm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Alarm |= (AlarmType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmMember) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+ }
+ m.MemberID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ m.Alarm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Alarm |= (AlarmType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Alarms = append(m.Alarms, &AlarmMember{})
+ if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
+ }
+ m.DbSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DbSize |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ m.Leader = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Leader |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
+ }
+ m.RaftIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftIndex |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+ }
+ m.RaftTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftTerm |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType)
+ }
+ m.RaftAppliedIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftAppliedIndex |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType)
+ }
+ m.DbSizeInUse = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DbSizeInUse |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Perm == nil {
+ m.Perm = &authpb.Permission{}
+ }
+ if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Token = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Perm = append(m.Perm, &authpb.Permission{})
+ if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRpc(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRpc
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRpc(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) }
+
+var fileDescriptorRpc = []byte{
+ // 3836 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0xdd, 0x6f, 0x23, 0xc9,
+ 0x71, 0xd7, 0x90, 0xe2, 0x57, 0xf1, 0x43, 0x54, 0xeb, 0x63, 0x29, 0xee, 0xae, 0x56, 0xd7, 0xbb,
+ 0x7b, 0xab, 0xdb, 0xbd, 0x13, 0x6d, 0xd9, 0x4e, 0x80, 0x4d, 0xe2, 0x58, 0x2b, 0xf1, 0x56, 0x3a,
+ 0x69, 0x45, 0xdd, 0x88, 0xda, 0xfb, 0x80, 0x11, 0x61, 0x44, 0xf6, 0x4a, 0x13, 0x91, 0x33, 0xf4,
+ 0xcc, 0x90, 0x2b, 0x5d, 0x82, 0x38, 0x30, 0x9c, 0x00, 0xc9, 0xa3, 0x0d, 0x04, 0xc9, 0x43, 0x9e,
+ 0x82, 0x20, 0xf0, 0x43, 0x80, 0xbc, 0x05, 0xc8, 0x5f, 0x90, 0xb7, 0x24, 0xc8, 0x3f, 0x10, 0x5c,
+ 0xfc, 0x92, 0xff, 0x22, 0xe8, 0xaf, 0x99, 0x9e, 0x2f, 0x69, 0x6d, 0xfa, 0xfc, 0x22, 0x4d, 0x57,
+ 0x57, 0x57, 0x55, 0x57, 0x77, 0x57, 0x55, 0xff, 0x66, 0x08, 0x25, 0x67, 0xd4, 0xdb, 0x18, 0x39,
+ 0xb6, 0x67, 0xa3, 0x0a, 0xf1, 0x7a, 0x7d, 0x97, 0x38, 0x13, 0xe2, 0x8c, 0xce, 0x9a, 0x8b, 0xe7,
+ 0xf6, 0xb9, 0xcd, 0x3a, 0x5a, 0xf4, 0x89, 0xf3, 0x34, 0x57, 0x28, 0x4f, 0x6b, 0x38, 0xe9, 0xf5,
+ 0xd8, 0x9f, 0xd1, 0x59, 0xeb, 0x72, 0x22, 0xba, 0xee, 0xb2, 0x2e, 0x63, 0xec, 0x5d, 0xb0, 0x3f,
+ 0xa3, 0x33, 0xf6, 0x4f, 0x74, 0xde, 0x3b, 0xb7, 0xed, 0xf3, 0x01, 0x69, 0x19, 0x23, 0xb3, 0x65,
+ 0x58, 0x96, 0xed, 0x19, 0x9e, 0x69, 0x5b, 0x2e, 0xef, 0xc5, 0x7f, 0xa1, 0x41, 0x4d, 0x27, 0xee,
+ 0xc8, 0xb6, 0x5c, 0xb2, 0x4b, 0x8c, 0x3e, 0x71, 0xd0, 0x7d, 0x80, 0xde, 0x60, 0xec, 0x7a, 0xc4,
+ 0x39, 0x35, 0xfb, 0x0d, 0x6d, 0x4d, 0x5b, 0x9f, 0xd5, 0x4b, 0x82, 0xb2, 0xd7, 0x47, 0x77, 0xa1,
+ 0x34, 0x24, 0xc3, 0x33, 0xde, 0x9b, 0x61, 0xbd, 0x45, 0x4e, 0xd8, 0xeb, 0xa3, 0x26, 0x14, 0x1d,
+ 0x32, 0x31, 0x5d, 0xd3, 0xb6, 0x1a, 0xd9, 0x35, 0x6d, 0x3d, 0xab, 0xfb, 0x6d, 0x3a, 0xd0, 0x31,
+ 0xde, 0x78, 0xa7, 0x1e, 0x71, 0x86, 0x8d, 0x59, 0x3e, 0x90, 0x12, 0xba, 0xc4, 0x19, 0xe2, 0x9f,
+ 0xe6, 0xa0, 0xa2, 0x1b, 0xd6, 0x39, 0xd1, 0xc9, 0x8f, 0xc6, 0xc4, 0xf5, 0x50, 0x1d, 0xb2, 0x97,
+ 0xe4, 0x9a, 0xa9, 0xaf, 0xe8, 0xf4, 0x91, 0x8f, 0xb7, 0xce, 0xc9, 0x29, 0xb1, 0xb8, 0xe2, 0x0a,
+ 0x1d, 0x6f, 0x9d, 0x93, 0xb6, 0xd5, 0x47, 0x8b, 0x90, 0x1b, 0x98, 0x43, 0xd3, 0x13, 0x5a, 0x79,
+ 0x23, 0x64, 0xce, 0x6c, 0xc4, 0x9c, 0x6d, 0x00, 0xd7, 0x76, 0xbc, 0x53, 0xdb, 0xe9, 0x13, 0xa7,
+ 0x91, 0x5b, 0xd3, 0xd6, 0x6b, 0x9b, 0x8f, 0x36, 0xd4, 0x85, 0xd8, 0x50, 0x0d, 0xda, 0x38, 0xb6,
+ 0x1d, 0xaf, 0x43, 0x79, 0xf5, 0x92, 0x2b, 0x1f, 0xd1, 0xc7, 0x50, 0x66, 0x42, 0x3c, 0xc3, 0x39,
+ 0x27, 0x5e, 0x23, 0xcf, 0xa4, 0x3c, 0xbe, 0x45, 0x4a, 0x97, 0x31, 0xeb, 0x4c, 0x3d, 0x7f, 0x46,
+ 0x18, 0x2a, 0x2e, 0x71, 0x4c, 0x63, 0x60, 0x7e, 0x65, 0x9c, 0x0d, 0x48, 0xa3, 0xb0, 0xa6, 0xad,
+ 0x17, 0xf5, 0x10, 0x8d, 0xce, 0xff, 0x92, 0x5c, 0xbb, 0xa7, 0xb6, 0x35, 0xb8, 0x6e, 0x14, 0x19,
+ 0x43, 0x91, 0x12, 0x3a, 0xd6, 0xe0, 0x9a, 0x2d, 0x9a, 0x3d, 0xb6, 0x3c, 0xde, 0x5b, 0x62, 0xbd,
+ 0x25, 0x46, 0x61, 0xdd, 0xeb, 0x50, 0x1f, 0x9a, 0xd6, 0xe9, 0xd0, 0xee, 0x9f, 0xfa, 0x0e, 0x01,
+ 0xe6, 0x90, 0xda, 0xd0, 0xb4, 0x5e, 0xd9, 0x7d, 0x5d, 0xba, 0x85, 0x72, 0x1a, 0x57, 0x61, 0xce,
+ 0xb2, 0xe0, 0x34, 0xae, 0x54, 0xce, 0x0d, 0x58, 0xa0, 0x32, 0x7b, 0x0e, 0x31, 0x3c, 0x12, 0x30,
+ 0x57, 0x18, 0xf3, 0xfc, 0xd0, 0xb4, 0xb6, 0x59, 0x4f, 0x88, 0xdf, 0xb8, 0x8a, 0xf1, 0x57, 0x05,
+ 0xbf, 0x71, 0x15, 0xe6, 0xc7, 0x1b, 0x50, 0xf2, 0x7d, 0x8e, 0x8a, 0x30, 0x7b, 0xd8, 0x39, 0x6c,
+ 0xd7, 0x67, 0x10, 0x40, 0x7e, 0xeb, 0x78, 0xbb, 0x7d, 0xb8, 0x53, 0xd7, 0x50, 0x19, 0x0a, 0x3b,
+ 0x6d, 0xde, 0xc8, 0xe0, 0x17, 0x00, 0x81, 0x77, 0x51, 0x01, 0xb2, 0xfb, 0xed, 0x2f, 0xea, 0x33,
+ 0x94, 0xe7, 0x75, 0x5b, 0x3f, 0xde, 0xeb, 0x1c, 0xd6, 0x35, 0x3a, 0x78, 0x5b, 0x6f, 0x6f, 0x75,
+ 0xdb, 0xf5, 0x0c, 0xe5, 0x78, 0xd5, 0xd9, 0xa9, 0x67, 0x51, 0x09, 0x72, 0xaf, 0xb7, 0x0e, 0x4e,
+ 0xda, 0xf5, 0x59, 0xfc, 0x73, 0x0d, 0xaa, 0x62, 0xbd, 0xf8, 0x99, 0x40, 0xdf, 0x85, 0xfc, 0x05,
+ 0x3b, 0x17, 0x6c, 0x2b, 0x96, 0x37, 0xef, 0x45, 0x16, 0x37, 0x74, 0x76, 0x74, 0xc1, 0x8b, 0x30,
+ 0x64, 0x2f, 0x27, 0x6e, 0x23, 0xb3, 0x96, 0x5d, 0x2f, 0x6f, 0xd6, 0x37, 0xf8, 0x81, 0xdd, 0xd8,
+ 0x27, 0xd7, 0xaf, 0x8d, 0xc1, 0x98, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0x1d, 0xda, 0x0e, 0x61, 0x3b,
+ 0xb6, 0xa8, 0xb3, 0x67, 0xba, 0x8d, 0xd9, 0xa2, 0x89, 0xdd, 0xca, 0x1b, 0xf8, 0x17, 0x1a, 0xc0,
+ 0xd1, 0xd8, 0x4b, 0x3f, 0x1a, 0x8b, 0x90, 0x9b, 0x50, 0xc1, 0xe2, 0x58, 0xf0, 0x06, 0x3b, 0x13,
+ 0xc4, 0x70, 0x89, 0x7f, 0x26, 0x68, 0x03, 0xdd, 0x81, 0xc2, 0xc8, 0x21, 0x93, 0xd3, 0xcb, 0x09,
+ 0x53, 0x52, 0xd4, 0xf3, 0xb4, 0xb9, 0x3f, 0x41, 0xef, 0x41, 0xc5, 0x3c, 0xb7, 0x6c, 0x87, 0x9c,
+ 0x72, 0x59, 0x39, 0xd6, 0x5b, 0xe6, 0x34, 0x66, 0xb7, 0xc2, 0xc2, 0x05, 0xe7, 0x55, 0x96, 0x03,
+ 0x4a, 0xc2, 0x16, 0x94, 0x99, 0xa9, 0x53, 0xb9, 0xef, 0x83, 0xc0, 0xc6, 0x0c, 0x1b, 0x16, 0x77,
+ 0xa1, 0xb0, 0x1a, 0xff, 0x10, 0xd0, 0x0e, 0x19, 0x10, 0x8f, 0x4c, 0x13, 0x3d, 0x14, 0x9f, 0x64,
+ 0x55, 0x9f, 0xe0, 0x9f, 0x69, 0xb0, 0x10, 0x12, 0x3f, 0xd5, 0xb4, 0x1a, 0x50, 0xe8, 0x33, 0x61,
+ 0xdc, 0x82, 0xac, 0x2e, 0x9b, 0xe8, 0x19, 0x14, 0x85, 0x01, 0x6e, 0x23, 0x9b, 0xb2, 0x69, 0x0a,
+ 0xdc, 0x26, 0x17, 0xff, 0x22, 0x03, 0x25, 0x31, 0xd1, 0xce, 0x08, 0x6d, 0x41, 0xd5, 0xe1, 0x8d,
+ 0x53, 0x36, 0x1f, 0x61, 0x51, 0x33, 0x3d, 0x08, 0xed, 0xce, 0xe8, 0x15, 0x31, 0x84, 0x91, 0xd1,
+ 0xef, 0x41, 0x59, 0x8a, 0x18, 0x8d, 0x3d, 0xe1, 0xf2, 0x46, 0x58, 0x40, 0xb0, 0xff, 0x76, 0x67,
+ 0x74, 0x10, 0xec, 0x47, 0x63, 0x0f, 0x75, 0x61, 0x51, 0x0e, 0xe6, 0xb3, 0x11, 0x66, 0x64, 0x99,
+ 0x94, 0xb5, 0xb0, 0x94, 0xf8, 0x52, 0xed, 0xce, 0xe8, 0x48, 0x8c, 0x57, 0x3a, 0x55, 0x93, 0xbc,
+ 0x2b, 0x1e, 0xbc, 0x63, 0x26, 0x75, 0xaf, 0xac, 0xb8, 0x49, 0xdd, 0x2b, 0xeb, 0x45, 0x09, 0x0a,
+ 0xa2, 0x85, 0xff, 0x35, 0x03, 0x20, 0x57, 0xa3, 0x33, 0x42, 0x3b, 0x50, 0x73, 0x44, 0x2b, 0xe4,
+ 0xad, 0xbb, 0x89, 0xde, 0x12, 0x8b, 0x38, 0xa3, 0x57, 0xe5, 0x20, 0x6e, 0xdc, 0xf7, 0xa1, 0xe2,
+ 0x4b, 0x09, 0x1c, 0xb6, 0x92, 0xe0, 0x30, 0x5f, 0x42, 0x59, 0x0e, 0xa0, 0x2e, 0xfb, 0x0c, 0x96,
+ 0xfc, 0xf1, 0x09, 0x3e, 0x7b, 0xef, 0x06, 0x9f, 0xf9, 0x02, 0x17, 0xa4, 0x04, 0xd5, 0x6b, 0xaa,
+ 0x61, 0x81, 0xdb, 0x56, 0x12, 0xdc, 0x16, 0x37, 0x8c, 0x3a, 0x0e, 0x68, 0xbe, 0xe4, 0x4d, 0xfc,
+ 0x7f, 0x59, 0x28, 0x6c, 0xdb, 0xc3, 0x91, 0xe1, 0xd0, 0xd5, 0xc8, 0x3b, 0xc4, 0x1d, 0x0f, 0x3c,
+ 0xe6, 0xae, 0xda, 0xe6, 0xc3, 0xb0, 0x44, 0xc1, 0x26, 0xff, 0xeb, 0x8c, 0x55, 0x17, 0x43, 0xe8,
+ 0x60, 0x91, 0x1e, 0x33, 0xef, 0x30, 0x58, 0x24, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x6c, 0x70, 0x90,
+ 0x9b, 0x50, 0x98, 0x10, 0x27, 0x48, 0xe9, 0xbb, 0x33, 0xba, 0x24, 0xa0, 0x0f, 0x60, 0x2e, 0x9a,
+ 0x5e, 0x72, 0x82, 0xa7, 0xd6, 0x0b, 0x67, 0xa3, 0x87, 0x50, 0x09, 0xe5, 0xb8, 0xbc, 0xe0, 0x2b,
+ 0x0f, 0x95, 0x14, 0xb7, 0x2c, 0xe3, 0x2a, 0xcd, 0xc7, 0x95, 0xdd, 0x19, 0x19, 0x59, 0x97, 0x65,
+ 0x64, 0x2d, 0x8a, 0x51, 0x22, 0xb6, 0x86, 0x82, 0xcc, 0x0f, 0xc2, 0x41, 0x06, 0xff, 0x00, 0xaa,
+ 0x21, 0x07, 0xd1, 0xbc, 0xd3, 0xfe, 0xf4, 0x64, 0xeb, 0x80, 0x27, 0xa9, 0x97, 0x2c, 0x2f, 0xe9,
+ 0x75, 0x8d, 0xe6, 0xba, 0x83, 0xf6, 0xf1, 0x71, 0x3d, 0x83, 0xaa, 0x50, 0x3a, 0xec, 0x74, 0x4f,
+ 0x39, 0x57, 0x16, 0xbf, 0xf4, 0x25, 0x88, 0x24, 0xa7, 0xe4, 0xb6, 0x19, 0x25, 0xb7, 0x69, 0x32,
+ 0xb7, 0x65, 0x82, 0xdc, 0xc6, 0xd2, 0xdc, 0x41, 0x7b, 0xeb, 0xb8, 0x5d, 0x9f, 0x7d, 0x51, 0x83,
+ 0x0a, 0xf7, 0xef, 0xe9, 0xd8, 0xa2, 0xa9, 0xf6, 0x1f, 0x34, 0x80, 0xe0, 0x34, 0xa1, 0x16, 0x14,
+ 0x7a, 0x5c, 0x4f, 0x43, 0x63, 0xc1, 0x68, 0x29, 0x71, 0xc9, 0x74, 0xc9, 0x85, 0xbe, 0x0d, 0x05,
+ 0x77, 0xdc, 0xeb, 0x11, 0x57, 0xa6, 0xbc, 0x3b, 0xd1, 0x78, 0x28, 0xa2, 0x95, 0x2e, 0xf9, 0xe8,
+ 0x90, 0x37, 0x86, 0x39, 0x18, 0xb3, 0x04, 0x78, 0xf3, 0x10, 0xc1, 0x87, 0xff, 0x4e, 0x83, 0xb2,
+ 0xb2, 0x79, 0x7f, 0xcd, 0x20, 0x7c, 0x0f, 0x4a, 0xcc, 0x06, 0xd2, 0x17, 0x61, 0xb8, 0xa8, 0x07,
+ 0x04, 0xf4, 0x3b, 0x50, 0x92, 0x27, 0x40, 0x46, 0xe2, 0x46, 0xb2, 0xd8, 0xce, 0x48, 0x0f, 0x58,
+ 0xf1, 0x3e, 0xcc, 0x33, 0xaf, 0xf4, 0x68, 0x71, 0x2d, 0xfd, 0xa8, 0x96, 0x9f, 0x5a, 0xa4, 0xfc,
+ 0x6c, 0x42, 0x71, 0x74, 0x71, 0xed, 0x9a, 0x3d, 0x63, 0x20, 0xac, 0xf0, 0xdb, 0xf8, 0x13, 0x40,
+ 0xaa, 0xb0, 0x69, 0xa6, 0x8b, 0xab, 0x50, 0xde, 0x35, 0xdc, 0x0b, 0x61, 0x12, 0x7e, 0x06, 0x55,
+ 0xda, 0xdc, 0x7f, 0xfd, 0x0e, 0x36, 0xb2, 0xcb, 0x81, 0xe4, 0x9e, 0xca, 0xe7, 0x08, 0x66, 0x2f,
+ 0x0c, 0xf7, 0x82, 0x4d, 0xb4, 0xaa, 0xb3, 0x67, 0xf4, 0x01, 0xd4, 0x7b, 0x7c, 0x92, 0xa7, 0x91,
+ 0x2b, 0xc3, 0x9c, 0xa0, 0xfb, 0x95, 0xe0, 0xe7, 0x50, 0xe1, 0x73, 0xf8, 0x4d, 0x1b, 0x81, 0xe7,
+ 0x61, 0xee, 0xd8, 0x32, 0x46, 0xee, 0x85, 0x2d, 0xb3, 0x1b, 0x9d, 0x74, 0x3d, 0xa0, 0x4d, 0xa5,
+ 0xf1, 0x09, 0xcc, 0x39, 0x64, 0x68, 0x98, 0x96, 0x69, 0x9d, 0x9f, 0x9e, 0x5d, 0x7b, 0xc4, 0x15,
+ 0x17, 0xa6, 0x9a, 0x4f, 0x7e, 0x41, 0xa9, 0xd4, 0xb4, 0xb3, 0x81, 0x7d, 0x26, 0xc2, 0x1c, 0x7b,
+ 0xc6, 0x7f, 0x99, 0x81, 0xca, 0x67, 0x86, 0xd7, 0x93, 0x4b, 0x87, 0xf6, 0xa0, 0xe6, 0x07, 0x37,
+ 0x46, 0x11, 0xb6, 0x44, 0x52, 0x2c, 0x1b, 0x23, 0x4b, 0x69, 0x99, 0x1d, 0xab, 0x3d, 0x95, 0xc0,
+ 0x44, 0x19, 0x56, 0x8f, 0x0c, 0x7c, 0x51, 0x99, 0x74, 0x51, 0x8c, 0x51, 0x15, 0xa5, 0x12, 0x50,
+ 0x07, 0xea, 0x23, 0xc7, 0x3e, 0x77, 0x88, 0xeb, 0xfa, 0xc2, 0x78, 0x1a, 0xc3, 0x09, 0xc2, 0x8e,
+ 0x04, 0x6b, 0x20, 0x6e, 0x6e, 0x14, 0x26, 0xbd, 0x98, 0x0b, 0xea, 0x19, 0x1e, 0x9c, 0xfe, 0x2b,
+ 0x03, 0x28, 0x3e, 0xa9, 0x5f, 0xb5, 0xc4, 0x7b, 0x0c, 0x35, 0xd7, 0x33, 0x9c, 0xd8, 0x66, 0xab,
+ 0x32, 0xaa, 0x1f, 0xf1, 0x9f, 0x80, 0x6f, 0xd0, 0xa9, 0x65, 0x7b, 0xe6, 0x9b, 0x6b, 0x51, 0x25,
+ 0xd7, 0x24, 0xf9, 0x90, 0x51, 0x51, 0x1b, 0x0a, 0x6f, 0xcc, 0x81, 0x47, 0x1c, 0xb7, 0x91, 0x5b,
+ 0xcb, 0xae, 0xd7, 0x36, 0x9f, 0xdd, 0xb6, 0x0c, 0x1b, 0x1f, 0x33, 0xfe, 0xee, 0xf5, 0x88, 0xe8,
+ 0x72, 0xac, 0x5a, 0x79, 0xe6, 0x43, 0xd5, 0xf8, 0x0a, 0x14, 0xdf, 0x52, 0x11, 0xf4, 0x96, 0x5d,
+ 0xe0, 0xc5, 0x22, 0x6b, 0xf3, 0x4b, 0xf6, 0x1b, 0xc7, 0x38, 0x1f, 0x12, 0xcb, 0x93, 0xf7, 0x40,
+ 0xd9, 0xc6, 0x8f, 0x01, 0x02, 0x35, 0x34, 0xe4, 0x1f, 0x76, 0x8e, 0x4e, 0xba, 0xf5, 0x19, 0x54,
+ 0x81, 0xe2, 0x61, 0x67, 0xa7, 0x7d, 0xd0, 0xa6, 0xf9, 0x01, 0xb7, 0xa4, 0x4b, 0x43, 0x6b, 0xa9,
+ 0xea, 0xd4, 0x42, 0x3a, 0xf1, 0x32, 0x2c, 0x26, 0x2d, 0x20, 0xad, 0x45, 0xab, 0x62, 0x97, 0x4e,
+ 0x75, 0x54, 0x54, 0xd5, 0x99, 0xf0, 0x74, 0x1b, 0x50, 0xe0, 0xbb, 0xb7, 0x2f, 0x8a, 0x73, 0xd9,
+ 0xa4, 0x8e, 0xe0, 0x9b, 0x91, 0xf4, 0xc5, 0x2a, 0xf9, 0xed, 0xc4, 0xf0, 0x92, 0x4b, 0x0c, 0x2f,
+ 0xe8, 0x21, 0x54, 0xfd, 0xd3, 0x60, 0xb8, 0xa2, 0x16, 0x28, 0xe9, 0x15, 0xb9, 0xd1, 0x29, 0x2d,
+ 0xe4, 0xf4, 0x42, 0xd8, 0xe9, 0xe8, 0x31, 0xe4, 0xc9, 0x84, 0x58, 0x9e, 0xdb, 0x28, 0xb3, 0x8c,
+ 0x51, 0x95, 0xb5, 0x7b, 0x9b, 0x52, 0x75, 0xd1, 0x89, 0xbf, 0x07, 0xf3, 0xec, 0x8e, 0xf4, 0xd2,
+ 0x31, 0x2c, 0xf5, 0x32, 0xd7, 0xed, 0x1e, 0x08, 0x77, 0xd3, 0x47, 0x54, 0x83, 0xcc, 0xde, 0x8e,
+ 0x70, 0x42, 0x66, 0x6f, 0x07, 0xff, 0x44, 0x03, 0xa4, 0x8e, 0x9b, 0xca, 0xcf, 0x11, 0xe1, 0x52,
+ 0x7d, 0x36, 0x50, 0xbf, 0x08, 0x39, 0xe2, 0x38, 0xb6, 0xc3, 0x3c, 0x5a, 0xd2, 0x79, 0x03, 0x3f,
+ 0x12, 0x36, 0xe8, 0x64, 0x62, 0x5f, 0xfa, 0x67, 0x90, 0x4b, 0xd3, 0x7c, 0x53, 0xf7, 0x61, 0x21,
+ 0xc4, 0x35, 0x55, 0xe6, 0xfa, 0x18, 0xe6, 0x98, 0xb0, 0xed, 0x0b, 0xd2, 0xbb, 0x1c, 0xd9, 0xa6,
+ 0x15, 0xd3, 0x47, 0x57, 0x2e, 0x08, 0xb0, 0x74, 0x1e, 0x7c, 0x62, 0x15, 0x9f, 0xd8, 0xed, 0x1e,
+ 0xe0, 0x2f, 0x60, 0x39, 0x22, 0x47, 0x9a, 0xff, 0x87, 0x50, 0xee, 0xf9, 0x44, 0x57, 0xd4, 0x3a,
+ 0xf7, 0xc3, 0xc6, 0x45, 0x87, 0xaa, 0x23, 0x70, 0x07, 0xee, 0xc4, 0x44, 0x4f, 0x35, 0xe7, 0x27,
+ 0xb0, 0xc4, 0x04, 0xee, 0x13, 0x32, 0xda, 0x1a, 0x98, 0x93, 0x54, 0x4f, 0x8f, 0xc4, 0xa4, 0x14,
+ 0xc6, 0x6f, 0x76, 0x5f, 0xe0, 0xdf, 0x17, 0x1a, 0xbb, 0xe6, 0x90, 0x74, 0xed, 0x83, 0x74, 0xdb,
+ 0x68, 0x36, 0xbb, 0x24, 0xd7, 0xae, 0x28, 0x6b, 0xd8, 0x33, 0xfe, 0x47, 0x4d, 0xb8, 0x4a, 0x1d,
+ 0xfe, 0x0d, 0xef, 0xe4, 0x55, 0x80, 0x73, 0x7a, 0x64, 0x48, 0x9f, 0x76, 0x70, 0x44, 0x45, 0xa1,
+ 0xf8, 0x76, 0xd2, 0xf8, 0x5d, 0x11, 0x76, 0x2e, 0x8a, 0x7d, 0xce, 0xfe, 0xf8, 0x51, 0xee, 0x3e,
+ 0x94, 0x19, 0xe1, 0xd8, 0x33, 0xbc, 0xb1, 0x1b, 0x5b, 0x8c, 0x3f, 0x13, 0xdb, 0x5e, 0x0e, 0x9a,
+ 0x6a, 0x5e, 0xdf, 0x86, 0x3c, 0xbb, 0x4c, 0xc8, 0x52, 0x7a, 0x25, 0x61, 0x3f, 0x72, 0x3b, 0x74,
+ 0xc1, 0x88, 0x2f, 0x20, 0xff, 0x8a, 0x21, 0xb0, 0x8a, 0x65, 0xb3, 0x72, 0x29, 0x2c, 0x63, 0xc8,
+ 0x71, 0xa1, 0x92, 0xce, 0x9e, 0x59, 0xe5, 0x49, 0x88, 0x73, 0xa2, 0x1f, 0xf0, 0x0a, 0xb7, 0xa4,
+ 0xfb, 0x6d, 0xea, 0xb2, 0xde, 0xc0, 0x24, 0x96, 0xc7, 0x7a, 0x67, 0x59, 0xaf, 0x42, 0xc1, 0x1b,
+ 0x50, 0xe7, 0x9a, 0xb6, 0xfa, 0x7d, 0xa5, 0x82, 0xf4, 0xe5, 0x69, 0x61, 0x79, 0xf8, 0x9f, 0x34,
+ 0x98, 0x57, 0x06, 0x4c, 0xe5, 0x98, 0x0f, 0x21, 0xcf, 0x71, 0x66, 0x51, 0xac, 0x2c, 0x86, 0x47,
+ 0x71, 0x35, 0xba, 0xe0, 0x41, 0x1b, 0x50, 0xe0, 0x4f, 0xb2, 0x8c, 0x4f, 0x66, 0x97, 0x4c, 0xf8,
+ 0x31, 0x2c, 0x08, 0x12, 0x19, 0xda, 0x49, 0x7b, 0x9b, 0x39, 0x14, 0xff, 0x29, 0x2c, 0x86, 0xd9,
+ 0xa6, 0x9a, 0x92, 0x62, 0x64, 0xe6, 0x5d, 0x8c, 0xdc, 0x92, 0x46, 0x9e, 0x8c, 0xfa, 0x4a, 0x29,
+ 0x14, 0x5d, 0x75, 0x75, 0x45, 0x32, 0x91, 0x15, 0xf1, 0x27, 0x20, 0x45, 0xfc, 0x56, 0x27, 0xb0,
+ 0x20, 0xb7, 0xc3, 0x81, 0xe9, 0xfa, 0x15, 0xf7, 0x57, 0x80, 0x54, 0xe2, 0x6f, 0xdb, 0xa0, 0x1d,
+ 0x22, 0x13, 0xb9, 0x34, 0xe8, 0x13, 0x40, 0x2a, 0x71, 0xaa, 0x88, 0xde, 0x82, 0xf9, 0x57, 0xf6,
+ 0x84, 0x86, 0x06, 0x4a, 0x0d, 0x8e, 0x0c, 0xbf, 0x7f, 0xfb, 0xcb, 0xe6, 0xb7, 0xa9, 0x72, 0x75,
+ 0xc0, 0x54, 0xca, 0xff, 0x43, 0x83, 0xca, 0xd6, 0xc0, 0x70, 0x86, 0x52, 0xf1, 0xf7, 0x21, 0xcf,
+ 0x6f, 0x95, 0x02, 0xc8, 0x79, 0x3f, 0x2c, 0x46, 0xe5, 0xe5, 0x8d, 0x2d, 0x7e, 0x07, 0x15, 0xa3,
+ 0xa8, 0xe1, 0xe2, 0x5d, 0xcf, 0x4e, 0xe4, 0xdd, 0xcf, 0x0e, 0xfa, 0x08, 0x72, 0x06, 0x1d, 0xc2,
+ 0x42, 0x70, 0x2d, 0x7a, 0x9f, 0x67, 0xd2, 0x58, 0xed, 0xcb, 0xb9, 0xf0, 0x77, 0xa1, 0xac, 0x68,
+ 0x40, 0x05, 0xc8, 0xbe, 0x6c, 0x8b, 0x42, 0x75, 0x6b, 0xbb, 0xbb, 0xf7, 0x9a, 0x03, 0x19, 0x35,
+ 0x80, 0x9d, 0xb6, 0xdf, 0xce, 0xe0, 0xcf, 0xc5, 0x28, 0x11, 0xef, 0x54, 0x7b, 0xb4, 0x34, 0x7b,
+ 0x32, 0xef, 0x64, 0xcf, 0x15, 0x54, 0xc5, 0xf4, 0xa7, 0x0d, 0xdf, 0x4c, 0x5e, 0x4a, 0xf8, 0x56,
+ 0x8c, 0xd7, 0x05, 0x23, 0x9e, 0x83, 0xaa, 0x08, 0xe8, 0x62, 0xff, 0xfd, 0x4b, 0x06, 0x6a, 0x92,
+ 0x32, 0x2d, 0xe0, 0x2c, 0xb1, 0x32, 0x9e, 0x01, 0x7c, 0xa4, 0x6c, 0x19, 0xf2, 0xfd, 0xb3, 0x63,
+ 0xf3, 0x2b, 0xf9, 0x72, 0x40, 0xb4, 0x28, 0x7d, 0xc0, 0xf5, 0xf0, 0x37, 0x74, 0xa2, 0x85, 0xee,
+ 0xf1, 0x97, 0x77, 0x7b, 0x56, 0x9f, 0x5c, 0xb1, 0x3a, 0x7a, 0x56, 0x0f, 0x08, 0x0c, 0x44, 0x10,
+ 0x6f, 0xf2, 0x58, 0xf1, 0xac, 0xbc, 0xd9, 0x43, 0x4f, 0xa1, 0x4e, 0x9f, 0xb7, 0x46, 0xa3, 0x81,
+ 0x49, 0xfa, 0x5c, 0x40, 0x81, 0xf1, 0xc4, 0xe8, 0x54, 0x3b, 0x2b, 0x37, 0xdd, 0x46, 0x91, 0x85,
+ 0x2d, 0xd1, 0x42, 0x6b, 0x50, 0xe6, 0xf6, 0xed, 0x59, 0x27, 0x2e, 0x61, 0xaf, 0xb7, 0xb2, 0xba,
+ 0x4a, 0xa2, 0xe7, 0x78, 0x6b, 0xec, 0x5d, 0xb4, 0x2d, 0xe3, 0x6c, 0x20, 0xe3, 0x22, 0x4d, 0xe6,
+ 0x94, 0xb8, 0x63, 0xba, 0x2a, 0xb5, 0x0d, 0x0b, 0x94, 0x4a, 0x2c, 0xcf, 0xec, 0x29, 0x41, 0x54,
+ 0xa6, 0x4a, 0x2d, 0x92, 0x2a, 0x0d, 0xd7, 0x7d, 0x6b, 0x3b, 0x7d, 0xe1, 0x40, 0xbf, 0x8d, 0x77,
+ 0xb8, 0xf0, 0x13, 0x37, 0x94, 0x0c, 0x7f, 0x55, 0x29, 0xeb, 0x81, 0x94, 0x97, 0xc4, 0xbb, 0x41,
+ 0x0a, 0x7e, 0x06, 0x4b, 0x92, 0x53, 0x40, 0xbe, 0x37, 0x30, 0x77, 0xe0, 0xbe, 0x64, 0xde, 0xbe,
+ 0xa0, 0x57, 0xe0, 0x23, 0xa1, 0xf0, 0xd7, 0xb5, 0xf3, 0x05, 0x34, 0x7c, 0x3b, 0xd9, 0x35, 0xc4,
+ 0x1e, 0xa8, 0x06, 0x8c, 0x5d, 0xb1, 0x33, 0x4b, 0x3a, 0x7b, 0xa6, 0x34, 0xc7, 0x1e, 0xf8, 0x85,
+ 0x07, 0x7d, 0xc6, 0xdb, 0xb0, 0x22, 0x65, 0x88, 0x0b, 0x42, 0x58, 0x48, 0xcc, 0xa0, 0x24, 0x21,
+ 0xc2, 0x61, 0x74, 0xe8, 0xcd, 0x6e, 0x57, 0x39, 0xc3, 0xae, 0x65, 0x32, 0x35, 0x45, 0xe6, 0x12,
+ 0xdf, 0x11, 0xd4, 0x30, 0x35, 0x2f, 0x09, 0x32, 0x15, 0xa0, 0x92, 0xc5, 0x42, 0x50, 0x72, 0x6c,
+ 0x21, 0x62, 0xa2, 0x7f, 0x08, 0xab, 0xbe, 0x11, 0xd4, 0x6f, 0x47, 0xc4, 0x19, 0x9a, 0xae, 0xab,
+ 0x80, 0x84, 0x49, 0x13, 0x7f, 0x1f, 0x66, 0x47, 0x44, 0x44, 0xae, 0xf2, 0x26, 0xda, 0xe0, 0x6f,
+ 0xf5, 0x37, 0x94, 0xc1, 0xac, 0x1f, 0xf7, 0xe1, 0x81, 0x94, 0xce, 0x3d, 0x9a, 0x28, 0x3e, 0x6a,
+ 0x94, 0x84, 0x4e, 0x32, 0x29, 0xd0, 0x49, 0x36, 0x02, 0x5c, 0x7f, 0xc2, 0x1d, 0x29, 0xcf, 0xd6,
+ 0x54, 0x19, 0x69, 0x9f, 0xfb, 0xd4, 0x3f, 0x92, 0x53, 0x09, 0x3b, 0x83, 0xc5, 0xf0, 0x49, 0x9e,
+ 0x2a, 0x58, 0x2e, 0x42, 0xce, 0xb3, 0x2f, 0x89, 0x0c, 0x95, 0xbc, 0x21, 0x0d, 0xf6, 0x8f, 0xf9,
+ 0x54, 0x06, 0x1b, 0x81, 0x30, 0xb6, 0x25, 0xa7, 0xb5, 0x97, 0xae, 0xa6, 0x2c, 0xf1, 0x78, 0x03,
+ 0x1f, 0xc2, 0x72, 0x34, 0x4c, 0x4c, 0x65, 0xf2, 0x6b, 0xbe, 0x81, 0x93, 0x22, 0xc9, 0x54, 0x72,
+ 0x3f, 0x0d, 0x82, 0x81, 0x12, 0x50, 0xa6, 0x12, 0xa9, 0x43, 0x33, 0x29, 0xbe, 0xfc, 0x26, 0xf6,
+ 0xab, 0x1f, 0x6e, 0xa6, 0x12, 0xe6, 0x06, 0xc2, 0xa6, 0x5f, 0xfe, 0x20, 0x46, 0x64, 0x6f, 0x8c,
+ 0x11, 0xe2, 0x90, 0x04, 0x51, 0xec, 0x1b, 0xd8, 0x74, 0x42, 0x47, 0x10, 0x40, 0xa7, 0xd5, 0x41,
+ 0x73, 0x88, 0xaf, 0x83, 0x35, 0xe4, 0xc6, 0x56, 0xc3, 0xee, 0x54, 0x8b, 0xf1, 0x59, 0x10, 0x3b,
+ 0x63, 0x91, 0x79, 0x2a, 0xc1, 0x9f, 0xc3, 0x5a, 0x7a, 0x50, 0x9e, 0x46, 0xf2, 0xd3, 0x16, 0x94,
+ 0xfc, 0xb2, 0x55, 0xf9, 0x22, 0xa6, 0x0c, 0x85, 0xc3, 0xce, 0xf1, 0xd1, 0xd6, 0x76, 0x9b, 0x7f,
+ 0x12, 0xb3, 0xdd, 0xd1, 0xf5, 0x93, 0xa3, 0x6e, 0x3d, 0xb3, 0xf9, 0xcb, 0x2c, 0x64, 0xf6, 0x5f,
+ 0xa3, 0x2f, 0x20, 0xc7, 0xdf, 0x0f, 0xdf, 0xf0, 0x51, 0x40, 0xf3, 0xa6, 0x57, 0xe0, 0xf8, 0xce,
+ 0x4f, 0xfe, 0xfb, 0x97, 0x3f, 0xcf, 0xcc, 0xe3, 0x4a, 0x6b, 0xf2, 0x9d, 0xd6, 0xe5, 0xa4, 0xc5,
+ 0x72, 0xc3, 0x73, 0xed, 0x29, 0xfa, 0x14, 0xb2, 0x47, 0x63, 0x0f, 0xa5, 0x7e, 0x2c, 0xd0, 0x4c,
+ 0x7f, 0x2b, 0x8e, 0x97, 0x98, 0xd0, 0x39, 0x0c, 0x42, 0xe8, 0x68, 0xec, 0x51, 0x91, 0x3f, 0x82,
+ 0xb2, 0xfa, 0x4e, 0xfb, 0xd6, 0x2f, 0x08, 0x9a, 0xb7, 0xbf, 0x2f, 0xc7, 0xf7, 0x99, 0xaa, 0x3b,
+ 0x18, 0x09, 0x55, 0xfc, 0xad, 0xbb, 0x3a, 0x8b, 0xee, 0x95, 0x85, 0x52, 0xbf, 0x2f, 0x68, 0xa6,
+ 0xbf, 0x42, 0x8f, 0xcd, 0xc2, 0xbb, 0xb2, 0xa8, 0xc8, 0x3f, 0x16, 0x6f, 0xcf, 0x7b, 0x1e, 0x7a,
+ 0x90, 0xf0, 0xf6, 0x54, 0x7d, 0x4f, 0xd8, 0x5c, 0x4b, 0x67, 0x10, 0x4a, 0xee, 0x31, 0x25, 0xcb,
+ 0x78, 0x5e, 0x28, 0xe9, 0xf9, 0x2c, 0xcf, 0xb5, 0xa7, 0x9b, 0x3d, 0xc8, 0x31, 0x0c, 0x1e, 0x7d,
+ 0x29, 0x1f, 0x9a, 0x09, 0x2f, 0x23, 0x52, 0x16, 0x3a, 0x84, 0xde, 0xe3, 0x45, 0xa6, 0xa8, 0x86,
+ 0x4b, 0x54, 0x11, 0x43, 0xe0, 0x9f, 0x6b, 0x4f, 0xd7, 0xb5, 0x6f, 0x69, 0x9b, 0xff, 0x9c, 0x83,
+ 0x1c, 0x03, 0x9f, 0xd0, 0x25, 0x40, 0x80, 0x47, 0x47, 0x67, 0x17, 0x43, 0xb8, 0xa3, 0xb3, 0x8b,
+ 0x43, 0xd9, 0xb8, 0xc9, 0x94, 0x2e, 0xe2, 0x39, 0xaa, 0x94, 0x61, 0x5a, 0x2d, 0x06, 0xd3, 0x51,
+ 0x3f, 0xfe, 0x95, 0x26, 0xb0, 0x37, 0x7e, 0x96, 0x50, 0x92, 0xb4, 0x10, 0x28, 0x1d, 0xdd, 0x0e,
+ 0x09, 0x80, 0x34, 0xfe, 0x1e, 0x53, 0xd8, 0xc2, 0xf5, 0x40, 0xa1, 0xc3, 0x38, 0x9e, 0x6b, 0x4f,
+ 0xbf, 0x6c, 0xe0, 0x05, 0xe1, 0xe5, 0x48, 0x0f, 0xfa, 0x31, 0xd4, 0xc2, 0xa0, 0x2b, 0x7a, 0x98,
+ 0xa0, 0x2b, 0x8a, 0xdd, 0x36, 0x1f, 0xdd, 0xcc, 0x24, 0x6c, 0x5a, 0x65, 0x36, 0x09, 0xe5, 0x5c,
+ 0xf3, 0x25, 0x21, 0x23, 0x83, 0x32, 0x89, 0x35, 0x40, 0x7f, 0xaf, 0x09, 0x4c, 0x3c, 0x40, 0x51,
+ 0x51, 0x92, 0xf4, 0x18, 0x46, 0xdb, 0x7c, 0x7c, 0x0b, 0x97, 0x30, 0xe2, 0x0f, 0x98, 0x11, 0xbf,
+ 0x8b, 0x17, 0x03, 0x23, 0x3c, 0x73, 0x48, 0x3c, 0x5b, 0x58, 0xf1, 0xe5, 0x3d, 0x7c, 0x27, 0xe4,
+ 0x9c, 0x50, 0x6f, 0xb0, 0x58, 0x1c, 0x09, 0x4d, 0x5c, 0xac, 0x10, 0xb2, 0x9a, 0xb8, 0x58, 0x61,
+ 0x18, 0x35, 0x69, 0xb1, 0x38, 0xee, 0x99, 0xb4, 0x58, 0x7e, 0xcf, 0x26, 0xfb, 0x7e, 0x85, 0x7f,
+ 0xb5, 0x8a, 0x6c, 0x28, 0xf9, 0x28, 0x24, 0x5a, 0x4d, 0x42, 0x84, 0x82, 0xbb, 0x44, 0xf3, 0x41,
+ 0x6a, 0xbf, 0x30, 0xe8, 0x3d, 0x66, 0xd0, 0x5d, 0xbc, 0x4c, 0x35, 0x8b, 0x0f, 0x63, 0x5b, 0x1c,
+ 0x76, 0x68, 0x19, 0xfd, 0x3e, 0x75, 0xc4, 0x9f, 0x40, 0x45, 0x85, 0x09, 0xd1, 0x7b, 0x89, 0x28,
+ 0x94, 0x8a, 0x34, 0x36, 0xf1, 0x4d, 0x2c, 0x42, 0xf3, 0x23, 0xa6, 0x79, 0x15, 0xaf, 0x24, 0x68,
+ 0x76, 0x18, 0x6b, 0x48, 0x39, 0x87, 0xf8, 0x92, 0x95, 0x87, 0x10, 0xc4, 0x64, 0xe5, 0x61, 0x84,
+ 0xf0, 0x46, 0xe5, 0x63, 0xc6, 0x4a, 0x95, 0xbb, 0x00, 0x01, 0x98, 0x87, 0x12, 0x7d, 0xa9, 0x5c,
+ 0xa6, 0xa2, 0xc1, 0x21, 0x8e, 0x03, 0x62, 0xcc, 0xd4, 0x8a, 0x7d, 0x17, 0x51, 0x3b, 0x30, 0x5d,
+ 0x1a, 0x24, 0x36, 0xff, 0x3a, 0x0f, 0xe5, 0x57, 0x86, 0x69, 0x79, 0xc4, 0x32, 0xac, 0x1e, 0x41,
+ 0x67, 0x90, 0x63, 0x89, 0x32, 0x1a, 0x07, 0x55, 0x7c, 0x2b, 0x1a, 0x07, 0x43, 0xe0, 0x0f, 0x5e,
+ 0x63, 0x5a, 0x9b, 0x78, 0x89, 0x6a, 0x1d, 0x06, 0xa2, 0x5b, 0x0c, 0xb3, 0xa1, 0x13, 0x7d, 0x03,
+ 0x79, 0xf1, 0x3a, 0x20, 0x22, 0x28, 0x84, 0xe5, 0x34, 0xef, 0x25, 0x77, 0x26, 0x6d, 0x25, 0x55,
+ 0x8d, 0xcb, 0xf8, 0xa8, 0x9e, 0x09, 0x40, 0x00, 0x46, 0x46, 0x1d, 0x1a, 0xc3, 0x2e, 0x9b, 0x6b,
+ 0xe9, 0x0c, 0x42, 0xe7, 0x63, 0xa6, 0xf3, 0x01, 0x6e, 0x46, 0x75, 0xf6, 0x7d, 0x5e, 0xaa, 0xf7,
+ 0x8f, 0x60, 0x76, 0xd7, 0x70, 0x2f, 0x50, 0x24, 0xf5, 0x29, 0x1f, 0x93, 0x34, 0x9b, 0x49, 0x5d,
+ 0x42, 0xcb, 0x03, 0xa6, 0x65, 0x85, 0x47, 0x12, 0x55, 0xcb, 0x85, 0xe1, 0xd2, 0x9c, 0x82, 0xfa,
+ 0x90, 0xe7, 0xdf, 0x96, 0x44, 0xfd, 0x17, 0xfa, 0x3e, 0x25, 0xea, 0xbf, 0xf0, 0xe7, 0x28, 0xb7,
+ 0x6b, 0x19, 0x41, 0x51, 0x7e, 0xcc, 0x81, 0x22, 0x6f, 0xf6, 0x22, 0x1f, 0x7e, 0x34, 0x57, 0xd3,
+ 0xba, 0x85, 0xae, 0x87, 0x4c, 0xd7, 0x7d, 0xdc, 0x88, 0xad, 0x95, 0xe0, 0x7c, 0xae, 0x3d, 0xfd,
+ 0x96, 0x86, 0x7e, 0x0c, 0x10, 0xe0, 0xb7, 0xb1, 0x03, 0x10, 0x85, 0x82, 0x63, 0x07, 0x20, 0x06,
+ 0xfd, 0xe2, 0x0d, 0xa6, 0x77, 0x1d, 0x3f, 0x8c, 0xea, 0xf5, 0x1c, 0xc3, 0x72, 0xdf, 0x10, 0xe7,
+ 0x23, 0x8e, 0xd1, 0xb9, 0x17, 0xe6, 0x88, 0x1e, 0x86, 0x7f, 0x9b, 0x83, 0x59, 0x5a, 0x80, 0xd2,
+ 0x3c, 0x1d, 0xdc, 0xdb, 0xa3, 0x96, 0xc4, 0xd0, 0xb2, 0xa8, 0x25, 0xf1, 0x2b, 0x7f, 0x38, 0x4f,
+ 0xb3, 0x9f, 0x1b, 0x10, 0xc6, 0x40, 0x1d, 0x6d, 0x43, 0x59, 0xb9, 0xd8, 0xa3, 0x04, 0x61, 0x61,
+ 0x18, 0x2e, 0x1a, 0xf9, 0x13, 0x50, 0x01, 0x7c, 0x97, 0xe9, 0x5b, 0xe2, 0x91, 0x9f, 0xe9, 0xeb,
+ 0x73, 0x0e, 0xaa, 0xf0, 0x2d, 0x54, 0xd4, 0xcb, 0x3f, 0x4a, 0x90, 0x17, 0x81, 0xf8, 0xa2, 0x51,
+ 0x2e, 0x09, 0x3b, 0x08, 0x1f, 0x7c, 0xff, 0x27, 0x15, 0x92, 0x8d, 0x2a, 0x1e, 0x40, 0x41, 0xa0,
+ 0x01, 0x49, 0xb3, 0x0c, 0xe3, 0x81, 0x49, 0xb3, 0x8c, 0x40, 0x09, 0xe1, 0xda, 0x8e, 0x69, 0xa4,
+ 0x17, 0x1e, 0x99, 0x49, 0x84, 0xb6, 0x97, 0xc4, 0x4b, 0xd3, 0x16, 0x80, 0x5b, 0x69, 0xda, 0x94,
+ 0xcb, 0x66, 0x9a, 0xb6, 0x73, 0xe2, 0x89, 0xe3, 0x22, 0x2f, 0x71, 0x28, 0x45, 0x98, 0x1a, 0xbd,
+ 0xf1, 0x4d, 0x2c, 0x49, 0xa5, 0x77, 0xa0, 0x50, 0x84, 0x6e, 0x74, 0x05, 0x10, 0x60, 0x15, 0xd1,
+ 0x7a, 0x2a, 0x11, 0xf0, 0x8c, 0xd6, 0x53, 0xc9, 0x70, 0x47, 0x38, 0x34, 0x04, 0x7a, 0x79, 0xe5,
+ 0x4f, 0x35, 0xff, 0x4c, 0x03, 0x14, 0x87, 0x35, 0xd0, 0xb3, 0x64, 0xe9, 0x89, 0x30, 0x6a, 0xf3,
+ 0xc3, 0x77, 0x63, 0x4e, 0x8a, 0xf6, 0x81, 0x49, 0x3d, 0xc6, 0x3d, 0x7a, 0x4b, 0x8d, 0xfa, 0x73,
+ 0x0d, 0xaa, 0x21, 0x4c, 0x04, 0xbd, 0x9f, 0xb2, 0xa6, 0x11, 0x14, 0xb6, 0xf9, 0xe4, 0x56, 0xbe,
+ 0xa4, 0x42, 0x53, 0xd9, 0x01, 0xb2, 0xe2, 0xfe, 0xa9, 0x06, 0xb5, 0x30, 0x86, 0x82, 0x52, 0x64,
+ 0xc7, 0x50, 0xdc, 0xe6, 0xfa, 0xed, 0x8c, 0x37, 0x2f, 0x4f, 0x50, 0x6c, 0x0f, 0xa0, 0x20, 0x50,
+ 0x97, 0xa4, 0x8d, 0x1f, 0xc6, 0x7f, 0x93, 0x36, 0x7e, 0x04, 0xb2, 0x49, 0xd8, 0xf8, 0x8e, 0x3d,
+ 0x20, 0xca, 0x31, 0x13, 0xb0, 0x4c, 0x9a, 0xb6, 0x9b, 0x8f, 0x59, 0x04, 0xd3, 0x49, 0xd3, 0x16,
+ 0x1c, 0x33, 0x89, 0xc7, 0xa0, 0x14, 0x61, 0xb7, 0x1c, 0xb3, 0x28, 0x9c, 0x93, 0x70, 0xcc, 0x98,
+ 0x42, 0xe5, 0x98, 0x05, 0xc8, 0x49, 0xd2, 0x31, 0x8b, 0xc1, 0xd9, 0x49, 0xc7, 0x2c, 0x0e, 0xbe,
+ 0x24, 0xac, 0x23, 0xd3, 0x1b, 0x3a, 0x66, 0x0b, 0x09, 0x20, 0x0b, 0xfa, 0x30, 0xc5, 0x89, 0x89,
+ 0x28, 0x79, 0xf3, 0xa3, 0x77, 0xe4, 0x4e, 0xdd, 0xe3, 0xdc, 0xfd, 0x72, 0x8f, 0xff, 0x8d, 0x06,
+ 0x8b, 0x49, 0x00, 0x0d, 0x4a, 0xd1, 0x93, 0x82, 0xae, 0x37, 0x37, 0xde, 0x95, 0xfd, 0x66, 0x6f,
+ 0xf9, 0xbb, 0xfe, 0x45, 0xfd, 0xdf, 0xbf, 0x5e, 0xd5, 0xfe, 0xf3, 0xeb, 0x55, 0xed, 0x7f, 0xbe,
+ 0x5e, 0xd5, 0xfe, 0xf6, 0x7f, 0x57, 0x67, 0xce, 0xf2, 0xec, 0x87, 0x7a, 0xdf, 0xf9, 0xff, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xc6, 0xc3, 0xa2, 0xb2, 0x2f, 0x38, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto
new file mode 100644
index 0000000..8060ca0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.proto
@@ -0,0 +1,1120 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/mvcc/mvccpb/kv.proto";
+import "etcd/auth/authpb/auth.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+service KV {
+ // Range gets the keys in the range from the key-value store.
+ rpc Range(RangeRequest) returns (RangeResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/range"
+ body: "*"
+ };
+ }
+
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ rpc Put(PutRequest) returns (PutResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/put"
+ body: "*"
+ };
+ }
+
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/deleterange"
+ body: "*"
+ };
+ }
+
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ rpc Txn(TxnRequest) returns (TxnResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/txn"
+ body: "*"
+ };
+ }
+
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ rpc Compact(CompactionRequest) returns (CompactionResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/compaction"
+ body: "*"
+ };
+ }
+}
+
+service Watch {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
+ option (google.api.http) = {
+ post: "/v3/watch"
+ body: "*"
+ };
+ }
+}
+
+service Lease {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/grant"
+ body: "*"
+ };
+ }
+
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/revoke"
+ body: "*"
+ additional_bindings {
+ post: "/v3/kv/lease/revoke"
+ body: "*"
+ }
+ };
+ }
+
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/keepalive"
+ body: "*"
+ };
+ }
+
+ // LeaseTimeToLive retrieves lease information.
+ rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/timetolive"
+ body: "*"
+ additional_bindings {
+ post: "/v3/kv/lease/timetolive"
+ body: "*"
+ }
+ };
+ }
+
+ // LeaseLeases lists all existing leases.
+ rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/leases"
+ body: "*"
+ additional_bindings {
+ post: "/v3/kv/lease/leases"
+ body: "*"
+ }
+ };
+ }
+}
+
+service Cluster {
+ // MemberAdd adds a member into the cluster.
+ rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/add"
+ body: "*"
+ };
+ }
+
+ // MemberRemove removes an existing member from the cluster.
+ rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/remove"
+ body: "*"
+ };
+ }
+
+ // MemberUpdate updates the member configuration.
+ rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/update"
+ body: "*"
+ };
+ }
+
+ // MemberList lists all the members in the cluster.
+ rpc MemberList(MemberListRequest) returns (MemberListResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/list"
+ body: "*"
+ };
+ }
+}
+
+service Maintenance {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ rpc Alarm(AlarmRequest) returns (AlarmResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/alarm"
+ body: "*"
+ };
+ }
+
+ // Status gets the status of the member.
+ rpc Status(StatusRequest) returns (StatusResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/status"
+ body: "*"
+ };
+ }
+
+ // Defragment defragments a member's backend database to recover storage space.
+ rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/defragment"
+ body: "*"
+ };
+ }
+
+ // Hash computes the hash of whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashKV" API instead for "key" bucket consistency checks.
+ rpc Hash(HashRequest) returns (HashResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/hash"
+ body: "*"
+ };
+ }
+
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ // It only iterates "key" bucket in backend storage.
+ rpc HashKV(HashKVRequest) returns (HashKVResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/hash"
+ body: "*"
+ };
+ }
+
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/snapshot"
+ body: "*"
+ };
+ }
+
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/transfer-leadership"
+ body: "*"
+ };
+ }
+}
+
+service Auth {
+ // AuthEnable enables authentication.
+ rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/enable"
+ body: "*"
+ };
+ }
+
+ // AuthDisable disables authentication.
+ rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/disable"
+ body: "*"
+ };
+ }
+
+ // Authenticate processes an authenticate request.
+ rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/authenticate"
+ body: "*"
+ };
+ }
+
+ // UserAdd adds a new user.
+ rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/add"
+ body: "*"
+ };
+ }
+
+ // UserGet gets detailed user information.
+ rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/get"
+ body: "*"
+ };
+ }
+
+ // UserList gets a list of all users.
+ rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/list"
+ body: "*"
+ };
+ }
+
+ // UserDelete deletes a specified user.
+ rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/delete"
+ body: "*"
+ };
+ }
+
+ // UserChangePassword changes the password of a specified user.
+ rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/changepw"
+ body: "*"
+ };
+ }
+
+ // UserGrant grants a role to a specified user.
+ rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/grant"
+ body: "*"
+ };
+ }
+
+ // UserRevokeRole revokes a role of specified user.
+ rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/revoke"
+ body: "*"
+ };
+ }
+
+ // RoleAdd adds a new role.
+ rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/add"
+ body: "*"
+ };
+ }
+
+ // RoleGet gets detailed role information.
+ rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/get"
+ body: "*"
+ };
+ }
+
+ // RoleList gets lists of all roles.
+ rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/list"
+ body: "*"
+ };
+ }
+
+ // RoleDelete deletes a specified role.
+ rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/delete"
+ body: "*"
+ };
+ }
+
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/grant"
+ body: "*"
+ };
+ }
+
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/revoke"
+ body: "*"
+ };
+ }
+}
+
+message ResponseHeader {
+ // cluster_id is the ID of the cluster which sent the response.
+ uint64 cluster_id = 1;
+ // member_id is the ID of the member which sent the response.
+ uint64 member_id = 2;
+ // revision is the key-value store revision when the request was applied.
+ // For watch progress responses, the header.revision indicates progress. All future events
+ // recieved in this stream are guaranteed to have a higher revision number than the
+ // header.revision number.
+ int64 revision = 3;
+ // raft_term is the raft term when the request was applied.
+ uint64 raft_term = 4;
+}
+
+message RangeRequest {
+ enum SortOrder {
+ NONE = 0; // default, no sorting
+ ASCEND = 1; // lowest target value first
+ DESCEND = 2; // highest target value first
+ }
+ enum SortTarget {
+ KEY = 0;
+ VERSION = 1;
+ CREATE = 2;
+ MOD = 3;
+ VALUE = 4;
+ }
+
+ // key is the first key for the range. If range_end is not given, the request only looks up key.
+ bytes key = 1;
+ // range_end is the upper bound on the requested range [key, range_end).
+ // If range_end is '\0', the range is all keys >= key.
+ // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+ // then the range request gets all keys prefixed with key.
+ // If both key and range_end are '\0', then the range request returns all keys.
+ bytes range_end = 2;
+ // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+ // it is treated as no limit.
+ int64 limit = 3;
+ // revision is the point-in-time of the key-value store to use for the range.
+ // If revision is less or equal to zero, the range is over the newest key-value store.
+ // If the revision has been compacted, ErrCompacted is returned as a response.
+ int64 revision = 4;
+
+ // sort_order is the order for returned sorted results.
+ SortOrder sort_order = 5;
+
+ // sort_target is the key-value field to use for sorting.
+ SortTarget sort_target = 6;
+
+ // serializable sets the range request to use serializable member-local reads.
+ // Range requests are linearizable by default; linearizable requests have higher
+ // latency and lower throughput than serializable requests but reflect the current
+ // consensus of the cluster. For better performance, in exchange for possible stale reads,
+ // a serializable range request is served locally without needing to reach consensus
+ // with other nodes in the cluster.
+ bool serializable = 7;
+
+ // keys_only when set returns only the keys and not the values.
+ bool keys_only = 8;
+
+ // count_only when set returns only the count of the keys in the range.
+ bool count_only = 9;
+
+ // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+ // lesser mod revisions will be filtered away.
+ int64 min_mod_revision = 10;
+
+ // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+ // greater mod revisions will be filtered away.
+ int64 max_mod_revision = 11;
+
+ // min_create_revision is the lower bound for returned key create revisions; all keys with
+ // lesser create revisions will be filtered away.
+ int64 min_create_revision = 12;
+
+ // max_create_revision is the upper bound for returned key create revisions; all keys with
+ // greater create revisions will be filtered away.
+ int64 max_create_revision = 13;
+}
+
+message RangeResponse {
+ ResponseHeader header = 1;
+ // kvs is the list of key-value pairs matched by the range request.
+ // kvs is empty when count is requested.
+ repeated mvccpb.KeyValue kvs = 2;
+ // more indicates if there are more keys to return in the requested range.
+ bool more = 3;
+ // count is set to the number of keys within the range when requested.
+ int64 count = 4;
+}
+
+message PutRequest {
+ // key is the key, in bytes, to put into the key-value store.
+ bytes key = 1;
+ // value is the value, in bytes, to associate with the key in the key-value store.
+ bytes value = 2;
+ // lease is the lease ID to associate with the key in the key-value store. A lease
+ // value of 0 indicates no lease.
+ int64 lease = 3;
+
+ // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+ // The previous key-value pair will be returned in the put response.
+ bool prev_kv = 4;
+
+ // If ignore_value is set, etcd updates the key using its current value.
+ // Returns an error if the key does not exist.
+ bool ignore_value = 5;
+
+ // If ignore_lease is set, etcd updates the key using its current lease.
+ // Returns an error if the key does not exist.
+ bool ignore_lease = 6;
+}
+
+message PutResponse {
+ ResponseHeader header = 1;
+ // if prev_kv is set in the request, the previous key-value pair will be returned.
+ mvccpb.KeyValue prev_kv = 2;
+}
+
+message DeleteRangeRequest {
+ // key is the first key to delete in the range.
+ bytes key = 1;
+ // range_end is the key following the last key to delete for the range [key, range_end).
+ // If range_end is not given, the range is defined to contain only the key argument.
+ // If range_end is one bit larger than the given key, then the range is all the keys
+ // with the prefix (the given key).
+ // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+ bytes range_end = 2;
+
+ // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+ // The previous key-value pairs will be returned in the delete response.
+ bool prev_kv = 3;
+}
+
+message DeleteRangeResponse {
+ ResponseHeader header = 1;
+ // deleted is the number of keys deleted by the delete range request.
+ int64 deleted = 2;
+ // if prev_kv is set in the request, the previous key-value pairs will be returned.
+ repeated mvccpb.KeyValue prev_kvs = 3;
+}
+
+message RequestOp {
+ // request is a union of request types accepted by a transaction.
+ oneof request {
+ RangeRequest request_range = 1;
+ PutRequest request_put = 2;
+ DeleteRangeRequest request_delete_range = 3;
+ TxnRequest request_txn = 4;
+ }
+}
+
+message ResponseOp {
+ // response is a union of response types returned by a transaction.
+ oneof response {
+ RangeResponse response_range = 1;
+ PutResponse response_put = 2;
+ DeleteRangeResponse response_delete_range = 3;
+ TxnResponse response_txn = 4;
+ }
+}
+
+message Compare {
+ enum CompareResult {
+ EQUAL = 0;
+ GREATER = 1;
+ LESS = 2;
+ NOT_EQUAL = 3;
+ }
+ enum CompareTarget {
+ VERSION = 0;
+ CREATE = 1;
+ MOD = 2;
+ VALUE = 3;
+ LEASE = 4;
+ }
+ // result is logical comparison operation for this comparison.
+ CompareResult result = 1;
+ // target is the key-value field to inspect for the comparison.
+ CompareTarget target = 2;
+ // key is the subject key for the comparison operation.
+ bytes key = 3;
+ oneof target_union {
+ // version is the version of the given key
+ int64 version = 4;
+ // create_revision is the creation revision of the given key
+ int64 create_revision = 5;
+ // mod_revision is the last modified revision of the given key.
+ int64 mod_revision = 6;
+ // value is the value of the given key, in bytes.
+ bytes value = 7;
+ // lease is the lease id of the given key.
+ int64 lease = 8;
+ // leave room for more target_union field tags, jump to 64
+ }
+
+ // range_end compares the given target to all keys in the range [key, range_end).
+ // See RangeRequest for more details on key ranges.
+ bytes range_end = 64;
+ // TODO: fill out with most of the rest of RangeRequest fields when needed.
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+message TxnRequest {
+ // compare is a list of predicates representing a conjunction of terms.
+ // If the comparisons succeed, then the success requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ // If the comparisons fail, then the failure requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ repeated Compare compare = 1;
+ // success is a list of requests which will be applied when compare evaluates to true.
+ repeated RequestOp success = 2;
+ // failure is a list of requests which will be applied when compare evaluates to false.
+ repeated RequestOp failure = 3;
+}
+
+message TxnResponse {
+ ResponseHeader header = 1;
+ // succeeded is set to true if the compare evaluated to true or false otherwise.
+ bool succeeded = 2;
+ // responses is a list of responses corresponding to the results from applying
+ // success if succeeded is true or failure if succeeded is false.
+ repeated ResponseOp responses = 3;
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+message CompactionRequest {
+ // revision is the key-value store revision for the compaction operation.
+ int64 revision = 1;
+ // physical is set so the RPC will wait until the compaction is physically
+ // applied to the local database such that compacted entries are totally
+ // removed from the backend database.
+ bool physical = 2;
+}
+
+message CompactionResponse {
+ ResponseHeader header = 1;
+}
+
+message HashRequest {
+}
+
+message HashKVRequest {
+ // revision is the key-value store revision for the hash operation.
+ int64 revision = 1;
+}
+
+message HashKVResponse {
+ ResponseHeader header = 1;
+ // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+ uint32 hash = 2;
+ // compact_revision is the compacted revision of key-value store when hash begins.
+ int64 compact_revision = 3;
+}
+
+message HashResponse {
+ ResponseHeader header = 1;
+ // hash is the hash value computed from the responding member's KV's backend.
+ uint32 hash = 2;
+}
+
+message SnapshotRequest {
+}
+
+message SnapshotResponse {
+ // header has the current key-value store information. The first header in the snapshot
+ // stream indicates the point in time of the snapshot.
+ ResponseHeader header = 1;
+
+ // remaining_bytes is the number of blob bytes to be sent after this message
+ uint64 remaining_bytes = 2;
+
+ // blob contains the next chunk of the snapshot in the snapshot stream.
+ bytes blob = 3;
+}
+
+message WatchRequest {
+ // request_union is a request to either create a new watcher or cancel an existing watcher.
+ oneof request_union {
+ WatchCreateRequest create_request = 1;
+ WatchCancelRequest cancel_request = 2;
+ WatchProgressRequest progress_request = 3;
+ }
+}
+
+message WatchCreateRequest {
+ // key is the key to register for watching.
+ bytes key = 1;
+
+ // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+ // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+ // or equal to the key argument are watched.
+ // If the range_end is one bit larger than the given key,
+ // then all keys with the prefix (the given key) will be watched.
+ bytes range_end = 2;
+
+ // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+ int64 start_revision = 3;
+
+ // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+ // no events to the new watcher if there are no recent events. It is useful when clients
+ // wish to recover a disconnected watcher starting from a recent known revision.
+ // The etcd server may decide how often it will send notifications based on current load.
+ bool progress_notify = 4;
+
+ enum FilterType {
+ // filter out put event.
+ NOPUT = 0;
+ // filter out delete event.
+ NODELETE = 1;
+ }
+
+ // filters filter the events at server side before it sends back to the watcher.
+ repeated FilterType filters = 5;
+
+ // If prev_kv is set, created watcher gets the previous KV before the event happens.
+ // If the previous KV is already compacted, nothing will be returned.
+ bool prev_kv = 6;
+
+ // If watch_id is provided and non-zero, it will be assigned to this watcher.
+ // Since creating a watcher in etcd is not a synchronous operation,
+ // this can be used ensure that ordering is correct when creating multiple
+ // watchers on the same stream. Creating a watcher with an ID already in
+ // use on the stream will cause an error to be returned.
+ int64 watch_id = 7;
+
+ // fragment enables splitting large revisions into multiple watch responses.
+ bool fragment = 8;
+}
+
+message WatchCancelRequest {
+ // watch_id is the watcher id to cancel so that no more events are transmitted.
+ int64 watch_id = 1;
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+message WatchProgressRequest {
+}
+
+message WatchResponse {
+ ResponseHeader header = 1;
+ // watch_id is the ID of the watcher that corresponds to the response.
+ int64 watch_id = 2;
+
+ // created is set to true if the response is for a create watch request.
+ // The client should record the watch_id and expect to receive events for
+ // the created watcher from the same stream.
+ // All events sent to the created watcher will attach with the same watch_id.
+ bool created = 3;
+
+ // canceled is set to true if the response is for a cancel watch request.
+ // No further events will be sent to the canceled watcher.
+ bool canceled = 4;
+
+ // compact_revision is set to the minimum index if a watcher tries to watch
+ // at a compacted index.
+ //
+ // This happens when creating a watcher at a compacted revision or the watcher cannot
+ // catch up with the progress of the key-value store.
+ //
+ // The client should treat the watcher as canceled and should not try to create any
+ // watcher with the same start_revision again.
+ int64 compact_revision = 5;
+
+ // cancel_reason indicates the reason for canceling the watcher.
+ string cancel_reason = 6;
+
+ // framgment is true if large watch response was split over multiple responses.
+ bool fragment = 7;
+
+ repeated mvccpb.Event events = 11;
+}
+
+message LeaseGrantRequest {
+ // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+ int64 TTL = 1;
+ // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+ int64 ID = 2;
+}
+
+message LeaseGrantResponse {
+ ResponseHeader header = 1;
+ // ID is the lease ID for the granted lease.
+ int64 ID = 2;
+ // TTL is the server chosen lease time-to-live in seconds.
+ int64 TTL = 3;
+ string error = 4;
+}
+
+message LeaseRevokeRequest {
+ // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+ int64 ID = 1;
+}
+
+message LeaseRevokeResponse {
+ ResponseHeader header = 1;
+}
+
+message LeaseCheckpoint {
+ // ID is the lease ID to checkpoint.
+ int64 ID = 1;
+
+ // Remaining_TTL is the remaining time until expiry of the lease.
+ int64 remaining_TTL = 2;
+}
+
+message LeaseCheckpointRequest {
+ repeated LeaseCheckpoint checkpoints = 1;
+}
+
+message LeaseCheckpointResponse {
+ ResponseHeader header = 1;
+}
+
+message LeaseKeepAliveRequest {
+ // ID is the lease ID for the lease to keep alive.
+ int64 ID = 1;
+}
+
+message LeaseKeepAliveResponse {
+ ResponseHeader header = 1;
+ // ID is the lease ID from the keep alive request.
+ int64 ID = 2;
+ // TTL is the new time-to-live for the lease.
+ int64 TTL = 3;
+}
+
+message LeaseTimeToLiveRequest {
+ // ID is the lease ID for the lease.
+ int64 ID = 1;
+ // keys is true to query all the keys attached to this lease.
+ bool keys = 2;
+}
+
+message LeaseTimeToLiveResponse {
+ ResponseHeader header = 1;
+ // ID is the lease ID from the keep alive request.
+ int64 ID = 2;
+ // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+ int64 TTL = 3;
+ // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+ int64 grantedTTL = 4;
+ // Keys is the list of keys attached to this lease.
+ repeated bytes keys = 5;
+}
+
+message LeaseLeasesRequest {
+}
+
+message LeaseStatus {
+ int64 ID = 1;
+ // TODO: int64 TTL = 2;
+}
+
+message LeaseLeasesResponse {
+ ResponseHeader header = 1;
+ repeated LeaseStatus leases = 2;
+}
+
+message Member {
+ // ID is the member ID for this member.
+ uint64 ID = 1;
+ // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+ string name = 2;
+ // peerURLs is the list of URLs the member exposes to the cluster for communication.
+ repeated string peerURLs = 3;
+ // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+ repeated string clientURLs = 4;
+}
+
+message MemberAddRequest {
+ // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+ repeated string peerURLs = 1;
+}
+
+message MemberAddResponse {
+ ResponseHeader header = 1;
+ // member is the member information for the added member.
+ Member member = 2;
+ // members is a list of all members after adding the new member.
+ repeated Member members = 3;
+}
+
+message MemberRemoveRequest {
+ // ID is the member ID of the member to remove.
+ uint64 ID = 1;
+}
+
+message MemberRemoveResponse {
+ ResponseHeader header = 1;
+ // members is a list of all members after removing the member.
+ repeated Member members = 2;
+}
+
+message MemberUpdateRequest {
+ // ID is the member ID of the member to update.
+ uint64 ID = 1;
+ // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+ repeated string peerURLs = 2;
+}
+
+message MemberUpdateResponse{
+ ResponseHeader header = 1;
+ // members is a list of all members after updating the member.
+ repeated Member members = 2;
+}
+
+message MemberListRequest {
+}
+
+message MemberListResponse {
+ ResponseHeader header = 1;
+ // members is a list of all members associated with the cluster.
+ repeated Member members = 2;
+}
+
+message DefragmentRequest {
+}
+
+message DefragmentResponse {
+ ResponseHeader header = 1;
+}
+
+message MoveLeaderRequest {
+ // targetID is the node ID for the new leader.
+ uint64 targetID = 1;
+}
+
+message MoveLeaderResponse {
+ ResponseHeader header = 1;
+}
+
+enum AlarmType {
+ NONE = 0; // default, used to query if any alarm is active
+ NOSPACE = 1; // space quota is exhausted
+ CORRUPT = 2; // kv store corruption detected
+}
+
+message AlarmRequest {
+ enum AlarmAction {
+ GET = 0;
+ ACTIVATE = 1;
+ DEACTIVATE = 2;
+ }
+ // action is the kind of alarm request to issue. The action
+ // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+ // raised alarm.
+ AlarmAction action = 1;
+ // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+ // alarm request covers all members.
+ uint64 memberID = 2;
+ // alarm is the type of alarm to consider for this request.
+ AlarmType alarm = 3;
+}
+
+message AlarmMember {
+ // memberID is the ID of the member associated with the raised alarm.
+ uint64 memberID = 1;
+ // alarm is the type of alarm which has been raised.
+ AlarmType alarm = 2;
+}
+
+message AlarmResponse {
+ ResponseHeader header = 1;
+ // alarms is a list of alarms associated with the alarm request.
+ repeated AlarmMember alarms = 2;
+}
+
+message StatusRequest {
+}
+
+message StatusResponse {
+ ResponseHeader header = 1;
+ // version is the cluster protocol version used by the responding member.
+ string version = 2;
+ // dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
+ int64 dbSize = 3;
+ // leader is the member ID which the responding member believes is the current leader.
+ uint64 leader = 4;
+ // raftIndex is the current raft committed index of the responding member.
+ uint64 raftIndex = 5;
+ // raftTerm is the current raft term of the responding member.
+ uint64 raftTerm = 6;
+ // raftAppliedIndex is the current raft applied index of the responding member.
+ uint64 raftAppliedIndex = 7;
+ // errors contains alarm/health information and status.
+ repeated string errors = 8;
+ // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
+ int64 dbSizeInUse = 9;
+}
+
+message AuthEnableRequest {
+}
+
+message AuthDisableRequest {
+}
+
+message AuthenticateRequest {
+ string name = 1;
+ string password = 2;
+}
+
+message AuthUserAddRequest {
+ string name = 1;
+ string password = 2;
+}
+
+message AuthUserGetRequest {
+ string name = 1;
+}
+
+message AuthUserDeleteRequest {
+ // name is the name of the user to delete.
+ string name = 1;
+}
+
+message AuthUserChangePasswordRequest {
+ // name is the name of the user whose password is being changed.
+ string name = 1;
+ // password is the new password for the user.
+ string password = 2;
+}
+
+message AuthUserGrantRoleRequest {
+ // user is the name of the user which should be granted a given role.
+ string user = 1;
+ // role is the name of the role to grant to the user.
+ string role = 2;
+}
+
+message AuthUserRevokeRoleRequest {
+ string name = 1;
+ string role = 2;
+}
+
+message AuthRoleAddRequest {
+ // name is the name of the role to add to the authentication system.
+ string name = 1;
+}
+
+message AuthRoleGetRequest {
+ string role = 1;
+}
+
+message AuthUserListRequest {
+}
+
+message AuthRoleListRequest {
+}
+
+message AuthRoleDeleteRequest {
+ string role = 1;
+}
+
+message AuthRoleGrantPermissionRequest {
+ // name is the name of the role which will be granted the permission.
+ string name = 1;
+ // perm is the permission to grant to the role.
+ authpb.Permission perm = 2;
+}
+
+message AuthRoleRevokePermissionRequest {
+ string role = 1;
+ bytes key = 2;
+ bytes range_end = 3;
+}
+
+message AuthEnableResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthDisableResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthenticateResponse {
+ ResponseHeader header = 1;
+ // token is an authorized token that can be used in succeeding RPCs
+ string token = 2;
+}
+
+message AuthUserAddResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthUserGetResponse {
+ ResponseHeader header = 1;
+
+ repeated string roles = 2;
+}
+
+message AuthUserDeleteResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthUserChangePasswordResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthUserGrantRoleResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthUserRevokeRoleResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthRoleAddResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthRoleGetResponse {
+ ResponseHeader header = 1;
+
+ repeated authpb.Permission perm = 2;
+}
+
+message AuthRoleListResponse {
+ ResponseHeader header = 1;
+
+ repeated string roles = 2;
+}
+
+message AuthUserListResponse {
+ ResponseHeader header = 1;
+
+ repeated string users = 2;
+}
+
+message AuthRoleDeleteResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthRoleGrantPermissionResponse {
+ ResponseHeader header = 1;
+}
+
+message AuthRoleRevokePermissionResponse {
+ ResponseHeader header = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go
new file mode 100644
index 0000000..23fe337
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.pb.go
@@ -0,0 +1,718 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: kv.proto
+
+/*
+ Package mvccpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ kv.proto
+
+ It has these top-level messages:
+ KeyValue
+ Event
+*/
+package mvccpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Event_EventType int32
+
+const (
+ PUT Event_EventType = 0
+ DELETE Event_EventType = 1
+)
+
+var Event_EventType_name = map[int32]string{
+ 0: "PUT",
+ 1: "DELETE",
+}
+var Event_EventType_value = map[string]int32{
+ "PUT": 0,
+ "DELETE": 1,
+}
+
+func (x Event_EventType) String() string {
+ return proto.EnumName(Event_EventType_name, int32(x))
+}
+func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} }
+
+type KeyValue struct {
+ // key is the key in bytes. An empty key is not allowed.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // create_revision is the revision of last creation on this key.
+ CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
+ // mod_revision is the revision of last modification on this key.
+ ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
+ // version is the version of the key. A deletion resets
+ // the version to zero and any modification of the key
+ // increases its version.
+ Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+ // value is the value held by the key, in bytes.
+ Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
+ // lease is the ID of the lease that attached to key.
+ // When the attached lease expires, the key will be deleted.
+ // If lease is 0, then no lease is attached to the key.
+ Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
+}
+
+func (m *KeyValue) Reset() { *m = KeyValue{} }
+func (m *KeyValue) String() string { return proto.CompactTextString(m) }
+func (*KeyValue) ProtoMessage() {}
+func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} }
+
+type Event struct {
+ // type is the kind of event. If type is a PUT, it indicates
+ // new data has been stored to the key. If type is a DELETE,
+ // it indicates the key was deleted.
+ Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
+ // kv holds the KeyValue for the event.
+ // A PUT event contains current kv pair.
+ // A PUT event with kv.Version=1 indicates the creation of a key.
+ // A DELETE/EXPIRE event contains the deleted key with
+ // its modification revision set to the revision of deletion.
+ Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
+ // prev_kv holds the key-value pair before the event happens.
+ PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
+}
+
+func (m *Event) Reset() { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage() {}
+func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} }
+
+func init() {
+ proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
+ proto.RegisterType((*Event)(nil), "mvccpb.Event")
+ proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
+}
+func (m *KeyValue) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if m.CreateRevision != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
+ }
+ if m.ModRevision != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
+ }
+ if m.Version != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Version))
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ if m.Lease != 0 {
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Lease))
+ }
+ return i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Type != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Type))
+ }
+ if m.Kv != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.Kv.Size()))
+ n1, err := m.Kv.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.PrevKv != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintKv(dAtA, i, uint64(m.PrevKv.Size()))
+ n2, err := m.PrevKv.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *KeyValue) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.CreateRevision != 0 {
+ n += 1 + sovKv(uint64(m.CreateRevision))
+ }
+ if m.ModRevision != 0 {
+ n += 1 + sovKv(uint64(m.ModRevision))
+ }
+ if m.Version != 0 {
+ n += 1 + sovKv(uint64(m.Version))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovKv(uint64(m.Lease))
+ }
+ return n
+}
+
+func (m *Event) Size() (n int) {
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovKv(uint64(m.Type))
+ }
+ if m.Kv != nil {
+ l = m.Kv.Size()
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.PrevKv != nil {
+ l = m.PrevKv.Size()
+ n += 1 + l + sovKv(uint64(l))
+ }
+ return n
+}
+
+func sovKv(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozKv(x uint64) (n int) {
+ return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *KeyValue) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+ }
+ m.CreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CreateRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+ }
+ m.ModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ModRevision |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ m.Version = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Version |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipKv(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthKv
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Event: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (Event_EventType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kv == nil {
+ m.Kv = &KeyValue{}
+ }
+ if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PrevKv == nil {
+ m.PrevKv = &KeyValue{}
+ }
+ if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipKv(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthKv
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipKv(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthKv
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipKv(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) }
+
+var fileDescriptorKv = []byte{
+ // 303 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
+ 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
+ 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
+ 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
+ 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
+ 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
+ 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
+ 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
+ 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
+ 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
+ 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
+ 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
+ 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
+ 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
+ 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
+ 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
+ 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
+ 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
+ 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto
new file mode 100644
index 0000000..23c911b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/mvccpb/kv.proto
@@ -0,0 +1,49 @@
+syntax = "proto3";
+package mvccpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message KeyValue {
+ // key is the key in bytes. An empty key is not allowed.
+ bytes key = 1;
+ // create_revision is the revision of last creation on this key.
+ int64 create_revision = 2;
+ // mod_revision is the revision of last modification on this key.
+ int64 mod_revision = 3;
+ // version is the version of the key. A deletion resets
+ // the version to zero and any modification of the key
+ // increases its version.
+ int64 version = 4;
+ // value is the value held by the key, in bytes.
+ bytes value = 5;
+ // lease is the ID of the lease that attached to key.
+ // When the attached lease expires, the key will be deleted.
+ // If lease is 0, then no lease is attached to the key.
+ int64 lease = 6;
+}
+
+message Event {
+ enum EventType {
+ PUT = 0;
+ DELETE = 1;
+ }
+ // type is the kind of event. If type is a PUT, it indicates
+ // new data has been stored to the key. If type is a DELETE,
+ // it indicates the key was deleted.
+ EventType type = 1;
+ // kv holds the KeyValue for the event.
+ // A PUT event contains current kv pair.
+ // A PUT event with kv.Version=1 indicates the creation of a key.
+ // A DELETE/EXPIRE event contains the deleted key with
+ // its modification revision set to the revision of deletion.
+ KeyValue kv = 2;
+
+ // prev_kv holds the key-value pair before the event happens.
+ KeyValue prev_kv = 3;
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go
new file mode 100644
index 0000000..81b0a9d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/discard_logger.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "log"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+// assert that "discardLogger" satisfy "Logger" interface
+var _ Logger = &discardLogger{}
+
+// NewDiscardLogger returns a new Logger that discards everything except "fatal".
+func NewDiscardLogger() Logger { return &discardLogger{} }
+
+type discardLogger struct{}
+
+func (l *discardLogger) Info(args ...interface{}) {}
+func (l *discardLogger) Infoln(args ...interface{}) {}
+func (l *discardLogger) Infof(format string, args ...interface{}) {}
+func (l *discardLogger) Warning(args ...interface{}) {}
+func (l *discardLogger) Warningln(args ...interface{}) {}
+func (l *discardLogger) Warningf(format string, args ...interface{}) {}
+func (l *discardLogger) Error(args ...interface{}) {}
+func (l *discardLogger) Errorln(args ...interface{}) {}
+func (l *discardLogger) Errorf(format string, args ...interface{}) {}
+func (l *discardLogger) Fatal(args ...interface{}) { log.Fatal(args...) }
+func (l *discardLogger) Fatalln(args ...interface{}) { log.Fatalln(args...) }
+func (l *discardLogger) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) }
+func (l *discardLogger) V(lvl int) bool {
+ return false
+}
+func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/doc.go b/vendor/go.etcd.io/etcd/pkg/logutil/doc.go
new file mode 100644
index 0000000..e919f24
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package logutil includes utilities to facilitate logging.
+package logutil
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/logger.go
new file mode 100644
index 0000000..e7da80e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/logger.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import "google.golang.org/grpc/grpclog"
+
+// Logger defines logging interface.
+// TODO: deprecate in v3.5.
+type Logger interface {
+ grpclog.LoggerV2
+
+ // Lvl returns logger if logger's verbosity level >= "lvl".
+ // Otherwise, logger that discards everything.
+ Lvl(lvl int) grpclog.LoggerV2
+}
+
+// assert that "defaultLogger" satisfy "Logger" interface
+var _ Logger = &defaultLogger{}
+
+// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
+//
+// For example:
+//
+// var defaultLogger Logger
+// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
+// defaultLogger = NewLogger(g)
+//
+func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
+
+type defaultLogger struct {
+ g grpclog.LoggerV2
+}
+
+func (l *defaultLogger) Info(args ...interface{}) { l.g.Info(args...) }
+func (l *defaultLogger) Infoln(args ...interface{}) { l.g.Info(args...) }
+func (l *defaultLogger) Infof(format string, args ...interface{}) { l.g.Infof(format, args...) }
+func (l *defaultLogger) Warning(args ...interface{}) { l.g.Warning(args...) }
+func (l *defaultLogger) Warningln(args ...interface{}) { l.g.Warning(args...) }
+func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
+func (l *defaultLogger) Error(args ...interface{}) { l.g.Error(args...) }
+func (l *defaultLogger) Errorln(args ...interface{}) { l.g.Error(args...) }
+func (l *defaultLogger) Errorf(format string, args ...interface{}) { l.g.Errorf(format, args...) }
+func (l *defaultLogger) Fatal(args ...interface{}) { l.g.Fatal(args...) }
+func (l *defaultLogger) Fatalln(args ...interface{}) { l.g.Fatal(args...) }
+func (l *defaultLogger) Fatalf(format string, args ...interface{}) { l.g.Fatalf(format, args...) }
+func (l *defaultLogger) V(lvl int) bool { return l.g.V(lvl) }
+func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
+ if l.g.V(lvl) {
+ return l
+ }
+ return &discardLogger{}
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go
new file mode 100644
index 0000000..866b6f7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/merge_logger.go
@@ -0,0 +1,194 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+var (
+ defaultMergePeriod = time.Second
+ defaultTimeOutputScale = 10 * time.Millisecond
+
+ outputInterval = time.Second
+)
+
+// line represents a log line that can be printed out
+// through capnslog.PackageLogger.
+type line struct {
+ level capnslog.LogLevel
+ str string
+}
+
+func (l line) append(s string) line {
+ return line{
+ level: l.level,
+ str: l.str + " " + s,
+ }
+}
+
+// status represents the merge status of a line.
+type status struct {
+ period time.Duration
+
+ start time.Time // start time of latest merge period
+ count int // number of merged lines from starting
+}
+
+func (s *status) isInMergePeriod(now time.Time) bool {
+ return s.period == 0 || s.start.Add(s.period).After(now)
+}
+
+func (s *status) isEmpty() bool { return s.count == 0 }
+
+func (s *status) summary(now time.Time) string {
+ ts := s.start.Round(defaultTimeOutputScale)
+ took := now.Round(defaultTimeOutputScale).Sub(ts)
+ return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
+}
+
+func (s *status) reset(now time.Time) {
+ s.start = now
+ s.count = 0
+}
+
+// MergeLogger supports merge logging, which merges repeated log lines
+// and prints summary log lines instead.
+//
+// For merge logging, MergeLogger prints out the line when the line appears
+// at the first time. MergeLogger holds the same log line printed within
+// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
+// It stops merging when the line doesn't appear within the
+// defaultMergePeriod.
+type MergeLogger struct {
+ *capnslog.PackageLogger
+
+ mu sync.Mutex // protect statusm
+ statusm map[line]*status
+}
+
+func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
+ l := &MergeLogger{
+ PackageLogger: logger,
+ statusm: make(map[line]*status),
+ }
+ go l.outputLoop()
+ return l
+}
+
+func (l *MergeLogger) MergeInfo(entries ...interface{}) {
+ l.merge(line{
+ level: capnslog.INFO,
+ str: fmt.Sprint(entries...),
+ })
+}
+
+func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
+ l.merge(line{
+ level: capnslog.INFO,
+ str: fmt.Sprintf(format, args...),
+ })
+}
+
+func (l *MergeLogger) MergeNotice(entries ...interface{}) {
+ l.merge(line{
+ level: capnslog.NOTICE,
+ str: fmt.Sprint(entries...),
+ })
+}
+
+func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
+ l.merge(line{
+ level: capnslog.NOTICE,
+ str: fmt.Sprintf(format, args...),
+ })
+}
+
+func (l *MergeLogger) MergeWarning(entries ...interface{}) {
+ l.merge(line{
+ level: capnslog.WARNING,
+ str: fmt.Sprint(entries...),
+ })
+}
+
+func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
+ l.merge(line{
+ level: capnslog.WARNING,
+ str: fmt.Sprintf(format, args...),
+ })
+}
+
+func (l *MergeLogger) MergeError(entries ...interface{}) {
+ l.merge(line{
+ level: capnslog.ERROR,
+ str: fmt.Sprint(entries...),
+ })
+}
+
+func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
+ l.merge(line{
+ level: capnslog.ERROR,
+ str: fmt.Sprintf(format, args...),
+ })
+}
+
+func (l *MergeLogger) merge(ln line) {
+ l.mu.Lock()
+
+ // increase count if the logger is merging the line
+ if status, ok := l.statusm[ln]; ok {
+ status.count++
+ l.mu.Unlock()
+ return
+ }
+
+ // initialize status of the line
+ l.statusm[ln] = &status{
+ period: defaultMergePeriod,
+ start: time.Now(),
+ }
+ // release the lock before IO operation
+ l.mu.Unlock()
+ // print out the line at its first time
+ l.PackageLogger.Logf(ln.level, ln.str)
+}
+
+func (l *MergeLogger) outputLoop() {
+ for now := range time.Tick(outputInterval) {
+ var outputs []line
+
+ l.mu.Lock()
+ for ln, status := range l.statusm {
+ if status.isInMergePeriod(now) {
+ continue
+ }
+ if status.isEmpty() {
+ delete(l.statusm, ln)
+ continue
+ }
+ outputs = append(outputs, ln.append(status.summary(now)))
+ status.reset(now)
+ }
+ l.mu.Unlock()
+
+ for _, o := range outputs {
+ l.PackageLogger.Logf(o.level, o.str)
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go
new file mode 100644
index 0000000..729cbdb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/package_logger.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "github.com/coreos/pkg/capnslog"
+ "google.golang.org/grpc/grpclog"
+)
+
+// assert that "packageLogger" satisfy "Logger" interface
+var _ Logger = &packageLogger{}
+
+// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
+//
+// For example:
+//
+// var defaultLogger Logger
+// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot")
+//
+func NewPackageLogger(repo, pkg string) Logger {
+ return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
+}
+
+type packageLogger struct {
+ p *capnslog.PackageLogger
+}
+
+func (l *packageLogger) Info(args ...interface{}) { l.p.Info(args...) }
+func (l *packageLogger) Infoln(args ...interface{}) { l.p.Info(args...) }
+func (l *packageLogger) Infof(format string, args ...interface{}) { l.p.Infof(format, args...) }
+func (l *packageLogger) Warning(args ...interface{}) { l.p.Warning(args...) }
+func (l *packageLogger) Warningln(args ...interface{}) { l.p.Warning(args...) }
+func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
+func (l *packageLogger) Error(args ...interface{}) { l.p.Error(args...) }
+func (l *packageLogger) Errorln(args ...interface{}) { l.p.Error(args...) }
+func (l *packageLogger) Errorf(format string, args ...interface{}) { l.p.Errorf(format, args...) }
+func (l *packageLogger) Fatal(args ...interface{}) { l.p.Fatal(args...) }
+func (l *packageLogger) Fatalln(args ...interface{}) { l.p.Fatal(args...) }
+func (l *packageLogger) Fatalf(format string, args ...interface{}) { l.p.Fatalf(format, args...) }
+func (l *packageLogger) V(lvl int) bool {
+ return l.p.LevelAt(capnslog.LogLevel(lvl))
+}
+func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
+ if l.p.LevelAt(capnslog.LogLevel(lvl)) {
+ return l
+ }
+ return &discardLogger{}
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go
new file mode 100644
index 0000000..3f48d81
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_grpc.go
@@ -0,0 +1,111 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "google.golang.org/grpc/grpclog"
+)
+
+// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
+// It discards all INFO level logging in gRPC, if debug level
+// is not enabled in "*zap.Logger".
+func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
+ lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
+ if err != nil {
+ return nil, err
+ }
+ return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
+}
+
+// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
+// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
+// if debug level is not enabled in "*zap.Logger".
+func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
+ // "AddCallerSkip" to annotate caller outside of "logutil"
+ lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
+ return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+type zapGRPCLogger struct {
+ lg *zap.Logger
+ sugar *zap.SugaredLogger
+}
+
+func (zl *zapGRPCLogger) Info(args ...interface{}) {
+ if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+ return
+ }
+ zl.sugar.Info(args...)
+}
+
+func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
+ if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+ return
+ }
+ zl.sugar.Info(args...)
+}
+
+func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
+ if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+ return
+ }
+ zl.sugar.Infof(format, args...)
+}
+
+func (zl *zapGRPCLogger) Warning(args ...interface{}) {
+ zl.sugar.Warn(args...)
+}
+
+func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
+ zl.sugar.Warn(args...)
+}
+
+func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
+ zl.sugar.Warnf(format, args...)
+}
+
+func (zl *zapGRPCLogger) Error(args ...interface{}) {
+ zl.sugar.Error(args...)
+}
+
+func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
+ zl.sugar.Error(args...)
+}
+
+func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
+ zl.sugar.Errorf(format, args...)
+}
+
+func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
+ zl.sugar.Fatal(args...)
+}
+
+func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
+ zl.sugar.Fatal(args...)
+}
+
+func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
+ zl.sugar.Fatalf(format, args...)
+}
+
+func (zl *zapGRPCLogger) V(l int) bool {
+ // infoLog == 0
+ if l <= 0 { // debug level, then we ignore info level in gRPC
+ return !zl.lg.Core().Enabled(zapcore.DebugLevel)
+ }
+ return true
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go
new file mode 100644
index 0000000..fcd3903
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_journal.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package logutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "go.etcd.io/etcd/pkg/systemd"
+
+ "github.com/coreos/go-systemd/journal"
+ "go.uber.org/zap/zapcore"
+)
+
+// NewJournalWriter wraps "io.Writer" to redirect log output
+// to the local systemd journal. If journald send fails, it fails
+// back to writing to the original writer.
+// The decode overhead is only <30µs per write.
+// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
+func NewJournalWriter(wr io.Writer) (io.Writer, error) {
+ return &journalWriter{Writer: wr}, systemd.DialJournal()
+}
+
+type journalWriter struct {
+ io.Writer
+}
+
+// WARN: assume that etcd uses default field names in zap encoder config
+// make sure to keep this up-to-date!
+type logLine struct {
+ Level string `json:"level"`
+ Caller string `json:"caller"`
+}
+
+func (w *journalWriter) Write(p []byte) (int, error) {
+ line := &logLine{}
+ if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
+ return 0, err
+ }
+
+ var pri journal.Priority
+ switch line.Level {
+ case zapcore.DebugLevel.String():
+ pri = journal.PriDebug
+ case zapcore.InfoLevel.String():
+ pri = journal.PriInfo
+
+ case zapcore.WarnLevel.String():
+ pri = journal.PriWarning
+ case zapcore.ErrorLevel.String():
+ pri = journal.PriErr
+
+ case zapcore.DPanicLevel.String():
+ pri = journal.PriCrit
+ case zapcore.PanicLevel.String():
+ pri = journal.PriCrit
+ case zapcore.FatalLevel.String():
+ pri = journal.PriCrit
+
+ default:
+ panic(fmt.Errorf("unknown log level: %q", line.Level))
+ }
+
+ err := journal.Send(string(p), pri, map[string]string{
+ "PACKAGE": filepath.Dir(line.Caller),
+ "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+ })
+ if err != nil {
+ // "journal" also falls back to stderr
+ // "fmt.Fprintln(os.Stderr, s)"
+ return w.Writer.Write(p)
+ }
+ return 0, nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go
new file mode 100644
index 0000000..e92cba0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/logutil/zap_raft.go
@@ -0,0 +1,97 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "errors"
+
+ "go.etcd.io/etcd/raft"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// NewRaftLogger converts "*zap.Logger" to "raft.Logger".
+func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
+ if lcfg == nil {
+ return nil, errors.New("nil zap.Config")
+ }
+ lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
+ if err != nil {
+ return nil, err
+ }
+ return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
+}
+
+// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
+// and "zapcore.WriteSyncer".
+func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
+ // "AddCallerSkip" to annotate caller outside of "logutil"
+ lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
+ return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+type zapRaftLogger struct {
+ lg *zap.Logger
+ sugar *zap.SugaredLogger
+}
+
+func (zl *zapRaftLogger) Debug(args ...interface{}) {
+ zl.sugar.Debug(args...)
+}
+
+func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
+ zl.sugar.Debugf(format, args...)
+}
+
+func (zl *zapRaftLogger) Error(args ...interface{}) {
+ zl.sugar.Error(args...)
+}
+
+func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
+ zl.sugar.Errorf(format, args...)
+}
+
+func (zl *zapRaftLogger) Info(args ...interface{}) {
+ zl.sugar.Info(args...)
+}
+
+func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
+ zl.sugar.Infof(format, args...)
+}
+
+func (zl *zapRaftLogger) Warning(args ...interface{}) {
+ zl.sugar.Warn(args...)
+}
+
+func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
+ zl.sugar.Warnf(format, args...)
+}
+
+func (zl *zapRaftLogger) Fatal(args ...interface{}) {
+ zl.sugar.Fatal(args...)
+}
+
+func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
+ zl.sugar.Fatalf(format, args...)
+}
+
+func (zl *zapRaftLogger) Panic(args ...interface{}) {
+ zl.sugar.Panic(args...)
+}
+
+func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
+ zl.sugar.Panicf(format, args...)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/systemd/doc.go b/vendor/go.etcd.io/etcd/pkg/systemd/doc.go
new file mode 100644
index 0000000..30e77ce
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/systemd/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package systemd provides utility functions for systemd.
+package systemd
diff --git a/vendor/go.etcd.io/etcd/pkg/systemd/journal.go b/vendor/go.etcd.io/etcd/pkg/systemd/journal.go
new file mode 100644
index 0000000..b861c69
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/systemd/journal.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package systemd
+
+import "net"
+
+// DialJournal returns no error if the process can dial journal socket.
+// Returns an error if dial failed, whichi indicates journald is not available
+// (e.g. run embedded etcd as docker daemon).
+// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
+func DialJournal() error {
+ conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
+ if conn != nil {
+ defer conn.Close()
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/types/doc.go b/vendor/go.etcd.io/etcd/pkg/types/doc.go
new file mode 100644
index 0000000..de8ef0b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types declares various data types and implements type-checking
+// functions.
+package types
diff --git a/vendor/go.etcd.io/etcd/pkg/types/id.go b/vendor/go.etcd.io/etcd/pkg/types/id.go
new file mode 100644
index 0000000..ae00388
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/types/id.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import "strconv"
+
+// ID represents a generic identifier which is canonically
+// stored as a uint64 but is typically represented as a
+// base-16 string for input/output
+type ID uint64
+
+func (i ID) String() string {
+ return strconv.FormatUint(uint64(i), 16)
+}
+
+// IDFromString attempts to create an ID from a base-16 string.
+func IDFromString(s string) (ID, error) {
+ i, err := strconv.ParseUint(s, 16, 64)
+ return ID(i), err
+}
+
+// IDSlice implements the sort interface
+type IDSlice []ID
+
+func (p IDSlice) Len() int { return len(p) }
+func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
+func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/go.etcd.io/etcd/pkg/types/set.go b/vendor/go.etcd.io/etcd/pkg/types/set.go
new file mode 100644
index 0000000..c111b0c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/types/set.go
@@ -0,0 +1,178 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+type Set interface {
+ Add(string)
+ Remove(string)
+ Contains(string) bool
+ Equals(Set) bool
+ Length() int
+ Values() []string
+ Copy() Set
+ Sub(Set) Set
+}
+
+func NewUnsafeSet(values ...string) *unsafeSet {
+ set := &unsafeSet{make(map[string]struct{})}
+ for _, v := range values {
+ set.Add(v)
+ }
+ return set
+}
+
+func NewThreadsafeSet(values ...string) *tsafeSet {
+ us := NewUnsafeSet(values...)
+ return &tsafeSet{us, sync.RWMutex{}}
+}
+
+type unsafeSet struct {
+ d map[string]struct{}
+}
+
+// Add adds a new value to the set (no-op if the value is already present)
+func (us *unsafeSet) Add(value string) {
+ us.d[value] = struct{}{}
+}
+
+// Remove removes the given value from the set
+func (us *unsafeSet) Remove(value string) {
+ delete(us.d, value)
+}
+
+// Contains returns whether the set contains the given value
+func (us *unsafeSet) Contains(value string) (exists bool) {
+ _, exists = us.d[value]
+ return exists
+}
+
+// ContainsAll returns whether the set contains all given values
+func (us *unsafeSet) ContainsAll(values []string) bool {
+ for _, s := range values {
+ if !us.Contains(s) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equals returns whether the contents of two sets are identical
+func (us *unsafeSet) Equals(other Set) bool {
+ v1 := sort.StringSlice(us.Values())
+ v2 := sort.StringSlice(other.Values())
+ v1.Sort()
+ v2.Sort()
+ return reflect.DeepEqual(v1, v2)
+}
+
+// Length returns the number of elements in the set
+func (us *unsafeSet) Length() int {
+ return len(us.d)
+}
+
+// Values returns the values of the Set in an unspecified order.
+func (us *unsafeSet) Values() (values []string) {
+ values = make([]string, 0)
+ for val := range us.d {
+ values = append(values, val)
+ }
+ return values
+}
+
+// Copy creates a new Set containing the values of the first
+func (us *unsafeSet) Copy() Set {
+ cp := NewUnsafeSet()
+ for val := range us.d {
+ cp.Add(val)
+ }
+
+ return cp
+}
+
+// Sub removes all elements in other from the set
+func (us *unsafeSet) Sub(other Set) Set {
+ oValues := other.Values()
+ result := us.Copy().(*unsafeSet)
+
+ for _, val := range oValues {
+ if _, ok := result.d[val]; !ok {
+ continue
+ }
+ delete(result.d, val)
+ }
+
+ return result
+}
+
+type tsafeSet struct {
+ us *unsafeSet
+ m sync.RWMutex
+}
+
+func (ts *tsafeSet) Add(value string) {
+ ts.m.Lock()
+ defer ts.m.Unlock()
+ ts.us.Add(value)
+}
+
+func (ts *tsafeSet) Remove(value string) {
+ ts.m.Lock()
+ defer ts.m.Unlock()
+ ts.us.Remove(value)
+}
+
+func (ts *tsafeSet) Contains(value string) (exists bool) {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ return ts.us.Contains(value)
+}
+
+func (ts *tsafeSet) Equals(other Set) bool {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ return ts.us.Equals(other)
+}
+
+func (ts *tsafeSet) Length() int {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ return ts.us.Length()
+}
+
+func (ts *tsafeSet) Values() (values []string) {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ return ts.us.Values()
+}
+
+func (ts *tsafeSet) Copy() Set {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ usResult := ts.us.Copy().(*unsafeSet)
+ return &tsafeSet{usResult, sync.RWMutex{}}
+}
+
+func (ts *tsafeSet) Sub(other Set) Set {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ usResult := ts.us.Sub(other).(*unsafeSet)
+ return &tsafeSet{usResult, sync.RWMutex{}}
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/types/slice.go b/vendor/go.etcd.io/etcd/pkg/types/slice.go
new file mode 100644
index 0000000..0dd9ca7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/types/slice.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// Uint64Slice implements sort interface
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/go.etcd.io/etcd/pkg/types/urls.go b/vendor/go.etcd.io/etcd/pkg/types/urls.go
new file mode 100644
index 0000000..9e5d03f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/types/urls.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "sort"
+ "strings"
+)
+
+type URLs []url.URL
+
+func NewURLs(strs []string) (URLs, error) {
+ all := make([]url.URL, len(strs))
+ if len(all) == 0 {
+ return nil, errors.New("no valid URLs given")
+ }
+ for i, in := range strs {
+ in = strings.TrimSpace(in)
+ u, err := url.Parse(in)
+ if err != nil {
+ return nil, err
+ }
+ if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
+ return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
+ }
+ if _, _, err := net.SplitHostPort(u.Host); err != nil {
+ return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+ }
+ if u.Path != "" {
+ return nil, fmt.Errorf("URL must not contain a path: %s", in)
+ }
+ all[i] = *u
+ }
+ us := URLs(all)
+ us.Sort()
+
+ return us, nil
+}
+
+func MustNewURLs(strs []string) URLs {
+ urls, err := NewURLs(strs)
+ if err != nil {
+ panic(err)
+ }
+ return urls
+}
+
+func (us URLs) String() string {
+ return strings.Join(us.StringSlice(), ",")
+}
+
+func (us *URLs) Sort() {
+ sort.Sort(us)
+}
+func (us URLs) Len() int { return len(us) }
+func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
+func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
+
+func (us URLs) StringSlice() []string {
+ out := make([]string, len(us))
+ for i := range us {
+ out[i] = us[i].String()
+ }
+
+ return out
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go
new file mode 100644
index 0000000..47690cc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/types/urlsmap.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// URLsMap is a map from a name to its URLs.
+type URLsMap map[string]URLs
+
+// NewURLsMap returns a URLsMap instantiated from the given string,
+// which consists of discovery-formatted names-to-URLs, like:
+// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
+func NewURLsMap(s string) (URLsMap, error) {
+ m := parse(s)
+
+ cl := URLsMap{}
+ for name, urls := range m {
+ us, err := NewURLs(urls)
+ if err != nil {
+ return nil, err
+ }
+ cl[name] = us
+ }
+ return cl, nil
+}
+
+// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
+// string values in the map can be multiple values separated by the sep string.
+func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
+ var err error
+ um := URLsMap{}
+ for k, v := range m {
+ um[k], err = NewURLs(strings.Split(v, sep))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return um, nil
+}
+
+// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
+func (c URLsMap) String() string {
+ var pairs []string
+ for name, urls := range c {
+ for _, url := range urls {
+ pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
+ }
+ }
+ sort.Strings(pairs)
+ return strings.Join(pairs, ",")
+}
+
+// URLs returns a list of all URLs.
+// The returned list is sorted in ascending lexicographical order.
+func (c URLsMap) URLs() []string {
+ var urls []string
+ for _, us := range c {
+ for _, u := range us {
+ urls = append(urls, u.String())
+ }
+ }
+ sort.Strings(urls)
+ return urls
+}
+
+// Len returns the size of URLsMap.
+func (c URLsMap) Len() int {
+ return len(c)
+}
+
+// parse parses the given string and returns a map listing the values specified for each key.
+func parse(s string) map[string][]string {
+ m := make(map[string][]string)
+ for s != "" {
+ key := s
+ if i := strings.IndexAny(key, ","); i >= 0 {
+ key, s = key[:i], key[i+1:]
+ } else {
+ s = ""
+ }
+ if key == "" {
+ continue
+ }
+ value := ""
+ if i := strings.Index(key, "="); i >= 0 {
+ key, value = key[:i], key[i+1:]
+ }
+ m[key] = append(m[key], value)
+ }
+ return m
+}
diff --git a/vendor/go.etcd.io/etcd/raft/OWNERS b/vendor/go.etcd.io/etcd/raft/OWNERS
new file mode 100644
index 0000000..ab78106
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/OWNERS
@@ -0,0 +1,19 @@
+approvers:
+- heyitsanthony
+- philips
+- fanminshi
+- gyuho
+- mitake
+- jpbetz
+- xiang90
+- bdarnell
+reviewers:
+- heyitsanthony
+- philips
+- fanminshi
+- gyuho
+- mitake
+- jpbetz
+- xiang90
+- bdarnell
+- tschottdorf
diff --git a/vendor/go.etcd.io/etcd/raft/README.md b/vendor/go.etcd.io/etcd/raft/README.md
new file mode 100644
index 0000000..a78e5f7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/README.md
@@ -0,0 +1,197 @@
+# Raft library
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more.
+
+Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
+
+To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
+
+In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
+
+A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/master/contrib/raftexample
+
+# Features
+
+This raft implementation is a full feature implementation of Raft protocol. Features includes:
+
+- Leader election
+- Log replication
+- Log compaction
+- Membership changes
+- Leadership transfer extension
+- Efficient linearizable read-only queries served by both the leader and followers
+ - leader checks with quorum and bypasses Raft log before processing read-only queries
+ - followers asks leader to get a safe read index before processing read-only queries
+- More efficient lease-based linearizable read-only queries served by both the leader and followers
+ - leader bypasses Raft log and processing read-only queries locally
+ - followers asks leader to get a safe read index before processing read-only queries
+ - this approach relies on the clock of the all the machines in raft group
+
+This raft implementation also includes a few optional enhancements:
+
+- Optimistic pipelining to reduce log replication latency
+- Flow control for log replication
+- Batching Raft messages to reduce synchronized network I/O calls
+- Batching log entries to reduce disk synchronized I/O
+- Writing to leader's disk in parallel
+- Internal proposal redirection from followers to leader
+- Automatic stepping down when the leader loses quorum
+- Protection against unbounded log growth when quorum is lost
+
+## Notable Users
+
+- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
+- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
+- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store
+- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
+- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
+- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
+
+## Usage
+
+The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a three-node cluster
+```go
+ storage := raft.NewMemoryStorage()
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+ // Set peer list to the other nodes in the cluster.
+ // Note that they need to be started separately as well.
+ n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+```
+
+Start a single node cluster, like so:
+```go
+ // Create storage and config as shown above.
+ // Set peer list to itself, so this node can become the leader of this single-node cluster.
+ peers := []raft.Peer{{ID: 0x01}}
+ n := raft.StartNode(c, peers)
+```
+
+To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
+```go
+ // Create storage and config as shown above.
+ n := raft.StartNode(c, nil)
+```
+
+To restart a node from previous state:
+```go
+ storage := raft.NewMemoryStorage()
+
+ // Recover the in-memory storage from persistent snapshot, state and entries.
+ storage.ApplySnapshot(snapshot)
+ storage.SetHardState(state)
+ storage.Append(entries)
+
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+
+ // Restart raft without peer information.
+ // Peer information is already included in the storage.
+ n := raft.RestartNode(c)
+```
+
+After creating a Node, the user has a few responsibilities:
+
+First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
+
+1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
+
+Third, after receiving a message from another node, pass it to Node.Step:
+
+```go
+ func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+ n.Step(ctx, m)
+ }
+```
+
+Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+```go
+ for {
+ select {
+ case <-s.Ticker:
+ n.Tick()
+ case rd := <-s.Node.Ready():
+ saveToStorage(rd.HardState, rd.Entries, rd.Snapshot)
+ send(rd.Messages)
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ processSnapshot(rd.Snapshot)
+ }
+ for _, entry := range rd.CommittedEntries {
+ process(entry)
+ if entry.Type == raftpb.EntryConfChange {
+ var cc raftpb.ConfChange
+ cc.Unmarshal(entry.Data)
+ s.Node.ApplyConfChange(cc)
+ }
+ }
+ s.Node.Advance()
+ case <-s.done:
+ return
+ }
+ }
+```
+
+To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
+
+```go
+ n.Propose(ctx, data)
+```
+
+If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+```go
+ n.ProposeConfChange(ctx, cc)
+```
+
+After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
+
+```go
+ var cc raftpb.ConfChange
+ cc.Unmarshal(data)
+ n.ApplyConfChange(cc)
+```
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+## Implementation notes
+
+This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
+
+To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
+
+This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
diff --git a/vendor/go.etcd.io/etcd/raft/design.md b/vendor/go.etcd.io/etcd/raft/design.md
new file mode 100644
index 0000000..7bc0531
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/design.md
@@ -0,0 +1,57 @@
+## Progress
+
+Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress.
+
+`replication message` is a `msgApp` with log entries.
+
+A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
+
+A progress is in one of the three state: `probe`, `replicate`, `snapshot`.
+
+```
+ +--------------------------------------------------------+
+ | send snapshot |
+ | |
+ +---------+----------+ +----------v---------+
+ +---> probe | | snapshot |
+ | | max inflight = 1 <----------------------------------+ max inflight = 0 |
+ | +---------+----------+ +--------------------+
+ | | 1. snapshot success
+ | | (next=snapshot.index + 1)
+ | | 2. snapshot failure
+ | | (no change)
+ | | 3. receives msgAppResp(rej=false&&index>lastsnap.index)
+ | | (match=m.index,next=match+1)
+receives msgAppResp(rej=true)
+(next=match+1)| |
+ | |
+ | |
+ | | receives msgAppResp(rej=false&&index>match)
+ | | (match=m.index,next=match+1)
+ | |
+ | |
+ | |
+ | +---------v----------+
+ | | replicate |
+ +---+ max inflight = n |
+ +--------------------+
+```
+
+When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
+
+When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
+
+When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
+
+A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
+
+A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question)
+
+A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
+
+### Flow Control
+
+1. limit the max size of message sent per message. Max should be configurable.
+Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
+
+2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly.
diff --git a/vendor/go.etcd.io/etcd/raft/doc.go b/vendor/go.etcd.io/etcd/raft/doc.go
new file mode 100644
index 0000000..c30d884
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/doc.go
@@ -0,0 +1,300 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/etcd-io/etcd/tree/master/contrib/raftexample
+
+Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+ storage := raft.NewMemoryStorage()
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+ n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+ storage := raft.NewMemoryStorage()
+
+ // recover the in-memory storage from persistent
+ // snapshot, state and entries.
+ storage.ApplySnapshot(snapshot)
+ storage.SetHardState(state)
+ storage.Append(entries)
+
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+
+ // restart raft without peer information.
+ // peer information is already included in the storage.
+ n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialize the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+ func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+ n.Step(ctx, m)
+ }
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+ for {
+ select {
+ case <-s.Ticker:
+ n.Tick()
+ case rd := <-s.Node.Ready():
+ saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+ send(rd.Messages)
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ processSnapshot(rd.Snapshot)
+ }
+ for _, entry := range rd.CommittedEntries {
+ process(entry)
+ if entry.Type == raftpb.EntryConfChange {
+ var cc raftpb.ConfChange
+ cc.Unmarshal(entry.Data)
+ s.Node.ApplyConfChange(cc)
+ }
+ }
+ s.Node.Advance()
+ case <-s.done:
+ return
+ }
+ }
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+ n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove a node in a cluster, build ConfChange struct 'cc' and call:
+
+ n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+ var cc raftpb.ConfChange
+ cc.Unmarshal(data)
+ n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+ 'MsgHup' is used for election. If a node is a follower or candidate, the
+ 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+ candidate has not received any heartbeat before the election timeout, it
+ passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+ start a new election.
+
+ 'MsgBeat' is an internal type that signals the leader to send a heartbeat of
+ the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+ the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
+ send periodic 'MsgHeartbeat' messages to its followers.
+
+ 'MsgProp' proposes to append data to its log entries. This is a special
+ type to redirect proposals to leader. Therefore, send method overwrites
+ raftpb.Message's term with its HardState's term to avoid attaching its
+ local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+ method, the leader first calls the 'appendEntry' method to append entries
+ to its log, and then calls 'bcastAppend' method to send those entries to
+ its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+ follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+ method. It is stored with sender's ID and later forwarded to leader by
+ rafthttp package.
+
+ 'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+ which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+ type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+ back to follower, because it indicates that there is a valid leader sending
+ 'MsgApp' messages. Candidate and follower respond to this message in
+ 'MsgAppResp' type.
+
+ 'MsgAppResp' is response to log replication request('MsgApp'). When
+ 'MsgApp' is passed to candidate or follower's Step method, it responds by
+ calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+ mailbox.
+
+ 'MsgVote' requests votes for election. When a node is a follower or
+ candidate and 'MsgHup' is passed to its Step method, then the node calls
+ 'campaign' method to campaign itself to become a leader. Once 'campaign'
+ method is called, the node becomes candidate and sends 'MsgVote' to peers
+ in cluster to request votes. When passed to leader or candidate's Step
+ method and the message's Term is lower than leader's or candidate's,
+ 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+ If leader or candidate receives 'MsgVote' with higher term, it will revert
+ back to follower. When 'MsgVote' is passed to follower, it votes for the
+ sender only when sender's last term is greater than MsgVote's term or
+ sender's last term is equal to MsgVote's term but sender's last committed
+ index is greater than or equal to follower's.
+
+ 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+ passed to candidate, the candidate calculates how many votes it has won. If
+ it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+ If candidate receives majority of votes of denials, it reverts back to
+ follower.
+
+ 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
+ protocol. When Config.PreVote is true, a pre-election is carried out first
+ (using the same rules as a regular election), and no node increases its term
+ number unless the pre-election indicates that the campaigning node would win.
+ This minimizes disruption when a partitioned node rejoins the cluster.
+
+ 'MsgSnap' requests to install a snapshot message. When a node has just
+ become a leader or the leader receives 'MsgProp' message, it calls
+ 'bcastAppend' method, which then calls 'sendAppend' method to each
+ follower. In 'sendAppend', if a leader fails to get term or entries,
+ the leader requests snapshot by sending 'MsgSnap' type message.
+
+ 'MsgSnapStatus' tells the result of snapshot install message. When a
+ follower rejected 'MsgSnap', it indicates the snapshot request with
+ 'MsgSnap' had failed from network issues which causes the network layer
+ to fail to send out snapshots to its followers. Then leader considers
+ follower's progress as probe. When 'MsgSnap' were not rejected, it
+ indicates that the snapshot succeeded and the leader sets follower's
+ progress to probe and resumes its log replication.
+
+ 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+ to candidate and message's term is higher than candidate's, the candidate
+ reverts back to follower and updates its committed index from the one in
+ this heartbeat. And it sends the message to its mailbox. When
+ 'MsgHeartbeat' is passed to follower's Step method and message's term is
+ higher than follower's, the follower updates its leaderID with the ID
+ from the message.
+
+ 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+ is passed to leader's Step method, the leader knows which follower
+ responded. And only when the leader's last committed index is greater than
+ follower's Match index, the leader runs 'sendAppend` method.
+
+ 'MsgUnreachable' tells that request(message) wasn't delivered. When
+ 'MsgUnreachable' is passed to leader's Step method, the leader discovers
+ that the follower that sent this 'MsgUnreachable' is not reachable, often
+ indicating 'MsgApp' is lost. When follower's progress state is replicate,
+ the leader sets it back to probe.
+
+*/
+package raft
diff --git a/vendor/go.etcd.io/etcd/raft/log.go b/vendor/go.etcd.io/etcd/raft/log.go
new file mode 100644
index 0000000..03f83e6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/log.go
@@ -0,0 +1,370 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+ "log"
+
+ pb "go.etcd.io/etcd/raft/raftpb"
+)
+
+type raftLog struct {
+ // storage contains all stable entries since the last snapshot.
+ storage Storage
+
+ // unstable contains all unstable entries and snapshot.
+ // they will be saved into storage.
+ unstable unstable
+
+ // committed is the highest log position that is known to be in
+ // stable storage on a quorum of nodes.
+ committed uint64
+ // applied is the highest log position that the application has
+ // been instructed to apply to its state machine.
+ // Invariant: applied <= committed
+ applied uint64
+
+ logger Logger
+
+ // maxNextEntsSize is the maximum number aggregate byte size of the messages
+ // returned from calls to nextEnts.
+ maxNextEntsSize uint64
+}
+
+// newLog returns log using the given storage and default options. It
+// recovers the log to the state that it just commits and applies the
+// latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+ return newLogWithSize(storage, logger, noLimit)
+}
+
+// newLogWithSize returns a log using the given storage and max
+// message size.
+func newLogWithSize(storage Storage, logger Logger, maxNextEntsSize uint64) *raftLog {
+ if storage == nil {
+ log.Panic("storage must not be nil")
+ }
+ log := &raftLog{
+ storage: storage,
+ logger: logger,
+ maxNextEntsSize: maxNextEntsSize,
+ }
+ firstIndex, err := storage.FirstIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ lastIndex, err := storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ log.unstable.offset = lastIndex + 1
+ log.unstable.logger = logger
+ // Initialize our committed and applied pointers to the time of the last compaction.
+ log.committed = firstIndex - 1
+ log.applied = firstIndex - 1
+
+ return log
+}
+
+func (l *raftLog) String() string {
+ return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
+ if l.matchTerm(index, logTerm) {
+ lastnewi = index + uint64(len(ents))
+ ci := l.findConflict(ents)
+ switch {
+ case ci == 0:
+ case ci <= l.committed:
+ l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+ default:
+ offset := index + 1
+ l.append(ents[ci-offset:]...)
+ }
+ l.commitTo(min(committed, lastnewi))
+ return lastnewi, true
+ }
+ return 0, false
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+ if len(ents) == 0 {
+ return l.lastIndex()
+ }
+ if after := ents[0].Index - 1; after < l.committed {
+ l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+ }
+ l.unstable.truncateAndAppend(ents)
+ return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The first entry MUST have an index equal to the argument 'from'.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+ for _, ne := range ents {
+ if !l.matchTerm(ne.Index, ne.Term) {
+ if ne.Index <= l.lastIndex() {
+ l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+ ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
+ }
+ return ne.Index
+ }
+ }
+ return 0
+}
+
+func (l *raftLog) unstableEntries() []pb.Entry {
+ if len(l.unstable.entries) == 0 {
+ return nil
+ }
+ return l.unstable.entries
+}
+
+// nextEnts returns all the available entries for execution.
+// If applied is smaller than the index of snapshot, it returns all committed
+// entries after the index of snapshot.
+func (l *raftLog) nextEnts() (ents []pb.Entry) {
+ off := max(l.applied+1, l.firstIndex())
+ if l.committed+1 > off {
+ ents, err := l.slice(off, l.committed+1, l.maxNextEntsSize)
+ if err != nil {
+ l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+ }
+ return ents
+ }
+ return nil
+}
+
+// hasNextEnts returns if there is any available entries for execution. This
+// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
+func (l *raftLog) hasNextEnts() bool {
+ off := max(l.applied+1, l.firstIndex())
+ return l.committed+1 > off
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+ if l.unstable.snapshot != nil {
+ return *l.unstable.snapshot, nil
+ }
+ return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+ if i, ok := l.unstable.maybeFirstIndex(); ok {
+ return i
+ }
+ index, err := l.storage.FirstIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+ if i, ok := l.unstable.maybeLastIndex(); ok {
+ return i
+ }
+ i, err := l.storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+ // never decrease commit
+ if l.committed < tocommit {
+ if l.lastIndex() < tocommit {
+ l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+ }
+ l.committed = tocommit
+ }
+}
+
+func (l *raftLog) appliedTo(i uint64) {
+ if i == 0 {
+ return
+ }
+ if l.committed < i || i < l.applied {
+ l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+ }
+ l.applied = i
+}
+
+func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+func (l *raftLog) lastTerm() uint64 {
+ t, err := l.term(l.lastIndex())
+ if err != nil {
+ l.logger.Panicf("unexpected error when getting the last term (%v)", err)
+ }
+ return t
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+ // the valid term range is [index of dummy entry, last index]
+ dummyIndex := l.firstIndex() - 1
+ if i < dummyIndex || i > l.lastIndex() {
+ // TODO: return an error instead?
+ return 0, nil
+ }
+
+ if t, ok := l.unstable.maybeTerm(i); ok {
+ return t, nil
+ }
+
+ t, err := l.storage.Term(i)
+ if err == nil {
+ return t, nil
+ }
+ if err == ErrCompacted || err == ErrUnavailable {
+ return 0, err
+ }
+ panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
+ if i > l.lastIndex() {
+ return nil, nil
+ }
+ return l.slice(i, l.lastIndex()+1, maxsize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+ ents, err := l.entries(l.firstIndex(), noLimit)
+ if err == nil {
+ return ents
+ }
+ if err == ErrCompacted { // try again if there was a racing compaction
+ return l.allEntries()
+ }
+ // TODO (xiangli): handle error?
+ panic(err)
+}
+
+// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(lasti, term uint64) bool {
+ return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
+}
+
+func (l *raftLog) matchTerm(i, term uint64) bool {
+ t, err := l.term(i)
+ if err != nil {
+ return false
+ }
+ return t == term
+}
+
+func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
+ if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
+ l.commitTo(maxIndex)
+ return true
+ }
+ return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+ l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+ l.committed = s.Metadata.Index
+ l.unstable.restore(s)
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+ err := l.mustCheckOutOfBounds(lo, hi)
+ if err != nil {
+ return nil, err
+ }
+ if lo == hi {
+ return nil, nil
+ }
+ var ents []pb.Entry
+ if lo < l.unstable.offset {
+ storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
+ if err == ErrCompacted {
+ return nil, err
+ } else if err == ErrUnavailable {
+ l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
+ } else if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+
+ // check if ents has reached the size limitation
+ if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
+ return storedEnts, nil
+ }
+
+ ents = storedEnts
+ }
+ if hi > l.unstable.offset {
+ unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
+ if len(ents) > 0 {
+ ents = append([]pb.Entry{}, ents...)
+ ents = append(ents, unstable...)
+ } else {
+ ents = unstable
+ }
+ }
+ return limitSize(ents, maxSize), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+ if lo > hi {
+ l.logger.Panicf("invalid slice %d > %d", lo, hi)
+ }
+ fi := l.firstIndex()
+ if lo < fi {
+ return ErrCompacted
+ }
+
+ length := l.lastIndex() + 1 - fi
+ if lo < fi || hi > fi+length {
+ l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+ }
+ return nil
+}
+
+func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
+ if err == nil {
+ return t
+ }
+ if err == ErrCompacted {
+ return 0
+ }
+ l.logger.Panicf("unexpected error (%v)", err)
+ return 0
+}
diff --git a/vendor/go.etcd.io/etcd/raft/log_unstable.go b/vendor/go.etcd.io/etcd/raft/log_unstable.go
new file mode 100644
index 0000000..1005bf6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/log_unstable.go
@@ -0,0 +1,159 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "go.etcd.io/etcd/raft/raftpb"
+
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+ // the incoming unstable snapshot, if any.
+ snapshot *pb.Snapshot
+ // all entries that have not yet been written to storage.
+ entries []pb.Entry
+ offset uint64
+
+ logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+ if u.snapshot != nil {
+ return u.snapshot.Metadata.Index + 1, true
+ }
+ return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+ if l := len(u.entries); l != 0 {
+ return u.offset + uint64(l) - 1, true
+ }
+ if u.snapshot != nil {
+ return u.snapshot.Metadata.Index, true
+ }
+ return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+ if i < u.offset {
+ if u.snapshot == nil {
+ return 0, false
+ }
+ if u.snapshot.Metadata.Index == i {
+ return u.snapshot.Metadata.Term, true
+ }
+ return 0, false
+ }
+
+ last, ok := u.maybeLastIndex()
+ if !ok {
+ return 0, false
+ }
+ if i > last {
+ return 0, false
+ }
+ return u.entries[i-u.offset].Term, true
+}
+
+func (u *unstable) stableTo(i, t uint64) {
+ gt, ok := u.maybeTerm(i)
+ if !ok {
+ return
+ }
+ // if i < offset, term is matched with the snapshot
+ // only update the unstable entries if term is matched with
+ // an unstable entry.
+ if gt == t && i >= u.offset {
+ u.entries = u.entries[i+1-u.offset:]
+ u.offset = i + 1
+ u.shrinkEntriesArray()
+ }
+}
+
+// shrinkEntriesArray discards the underlying array used by the entries slice
+// if most of it isn't being used. This avoids holding references to a bunch of
+// potentially large entries that aren't needed anymore. Simply clearing the
+// entries wouldn't be safe because clients might still be using them.
+func (u *unstable) shrinkEntriesArray() {
+ // We replace the array if we're using less than half of the space in
+ // it. This number is fairly arbitrary, chosen as an attempt to balance
+ // memory usage vs number of allocations. It could probably be improved
+ // with some focused tuning.
+ const lenMultiple = 2
+ if len(u.entries) == 0 {
+ u.entries = nil
+ } else if len(u.entries)*lenMultiple < cap(u.entries) {
+ newEntries := make([]pb.Entry, len(u.entries))
+ copy(newEntries, u.entries)
+ u.entries = newEntries
+ }
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+ if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+ u.snapshot = nil
+ }
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+ u.offset = s.Metadata.Index + 1
+ u.entries = nil
+ u.snapshot = &s
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+ after := ents[0].Index
+ switch {
+ case after == u.offset+uint64(len(u.entries)):
+ // after is the next index in the u.entries
+ // directly append
+ u.entries = append(u.entries, ents...)
+ case after <= u.offset:
+ u.logger.Infof("replace the unstable entries from index %d", after)
+ // The log is being truncated to before our current offset
+ // portion, so set the offset and replace the entries
+ u.offset = after
+ u.entries = ents
+ default:
+ // truncate to after and copy to u.entries
+ // then append
+ u.logger.Infof("truncate the unstable entries before index %d", after)
+ u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
+ u.entries = append(u.entries, ents...)
+ }
+}
+
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+ u.mustCheckOutOfBounds(lo, hi)
+ return u.entries[lo-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.entries)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+ if lo > hi {
+ u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+ }
+ upper := u.offset + uint64(len(u.entries))
+ if lo < u.offset || hi > upper {
+ u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/raft/logger.go b/vendor/go.etcd.io/etcd/raft/logger.go
new file mode 100644
index 0000000..426a77d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/logger.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+)
+
+type Logger interface {
+ Debug(v ...interface{})
+ Debugf(format string, v ...interface{})
+
+ Error(v ...interface{})
+ Errorf(format string, v ...interface{})
+
+ Info(v ...interface{})
+ Infof(format string, v ...interface{})
+
+ Warning(v ...interface{})
+ Warningf(format string, v ...interface{})
+
+ Fatal(v ...interface{})
+ Fatalf(format string, v ...interface{})
+
+ Panic(v ...interface{})
+ Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) { raftLogger = l }
+
+var (
+ defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+ discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
+ raftLogger = Logger(defaultLogger)
+)
+
+const (
+ calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+ *log.Logger
+ debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+ l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+ l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+ if l.debug {
+ l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+ }
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+ if l.debug {
+ l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+ }
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+ l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+ l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+ l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+ l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+ l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+ l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+ l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+ l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+ l.Logger.Panic(v...)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+ l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+ return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/go.etcd.io/etcd/raft/node.go b/vendor/go.etcd.io/etcd/raft/node.go
new file mode 100644
index 0000000..749db98
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/node.go
@@ -0,0 +1,611 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "context"
+ "errors"
+
+ pb "go.etcd.io/etcd/raft/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+ SnapshotFinish SnapshotStatus = 1
+ SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+ emptyState = pb.HardState{}
+
+ // ErrStopped is returned by methods on Nodes that have been stopped.
+ ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+ Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
+ RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+ return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+ // The current volatile state of a Node.
+ // SoftState will be nil if there is no update.
+ // It is not required to consume or store SoftState.
+ *SoftState
+
+ // The current state of a Node to be saved to stable storage BEFORE
+ // Messages are sent.
+ // HardState will be equal to empty state if there is no update.
+ pb.HardState
+
+ // ReadStates can be used for node to serve linearizable read requests locally
+ // when its applied index is greater than the index in ReadState.
+ // Note that the readState will be returned when raft receives msgReadIndex.
+ // The returned is only valid for the request that requested to read.
+ ReadStates []ReadState
+
+ // Entries specifies entries to be saved to stable storage BEFORE
+ // Messages are sent.
+ Entries []pb.Entry
+
+ // Snapshot specifies the snapshot to be saved to stable storage.
+ Snapshot pb.Snapshot
+
+ // CommittedEntries specifies entries to be committed to a
+ // store/state-machine. These have previously been committed to stable
+ // store.
+ CommittedEntries []pb.Entry
+
+ // Messages specifies outbound messages to be sent AFTER Entries are
+ // committed to stable storage.
+ // If it contains a MsgSnap message, the application MUST report back to raft
+ // when the snapshot has been received or has failed by calling ReportSnapshot.
+ Messages []pb.Message
+
+ // MustSync indicates whether the HardState and Entries must be synchronously
+ // written to disk or if an asynchronous write is permissible.
+ MustSync bool
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+ return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+ return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+ return sp.Metadata.Index == 0
+}
+
+func (rd Ready) containsUpdates() bool {
+ return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
+ !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
+ len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
+}
+
+// appliedCursor extracts from the Ready the highest index the client has
+// applied (once the Ready is confirmed via Advance). If no information is
+// contained in the Ready, returns zero.
+func (rd Ready) appliedCursor() uint64 {
+ if n := len(rd.CommittedEntries); n > 0 {
+ return rd.CommittedEntries[n-1].Index
+ }
+ if index := rd.Snapshot.Metadata.Index; index > 0 {
+ return index
+ }
+ return 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+ // Tick increments the internal logical clock for the Node by a single tick. Election
+ // timeouts and heartbeat timeouts are in units of ticks.
+ Tick()
+ // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+ Campaign(ctx context.Context) error
+ // Propose proposes that data be appended to the log. Note that proposals can be lost without
+ // notice, therefore it is user's job to ensure proposal retries.
+ Propose(ctx context.Context, data []byte) error
+ // ProposeConfChange proposes config change.
+ // At most one ConfChange can be in the process of going through consensus.
+ // Application needs to call ApplyConfChange when applying EntryConfChange type entry.
+ ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
+ // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+ Step(ctx context.Context, msg pb.Message) error
+
+ // Ready returns a channel that returns the current point-in-time state.
+ // Users of the Node must call Advance after retrieving the state returned by Ready.
+ //
+ // NOTE: No committed entries from the next Ready may be applied until all committed entries
+ // and snapshots from the previous one have finished.
+ Ready() <-chan Ready
+
+ // Advance notifies the Node that the application has saved progress up to the last Ready.
+ // It prepares the node to return the next available Ready.
+ //
+ // The application should generally call Advance after it applies the entries in last Ready.
+ //
+ // However, as an optimization, the application may call Advance while it is applying the
+ // commands. For example. when the last Ready contains a snapshot, the application might take
+ // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+ // progress, it can call Advance before finishing applying the last ready.
+ Advance()
+ // ApplyConfChange applies config change to the local node.
+ // Returns an opaque ConfState protobuf which must be recorded
+ // in snapshots. Will never return nil; it returns a pointer only
+ // to match MemoryStorage.Compact.
+ ApplyConfChange(cc pb.ConfChange) *pb.ConfState
+
+ // TransferLeadership attempts to transfer leadership to the given transferee.
+ TransferLeadership(ctx context.Context, lead, transferee uint64)
+
+ // ReadIndex request a read state. The read state will be set in the ready.
+ // Read state has a read index. Once the application advances further than the read
+ // index, any linearizable read requests issued before the read request can be
+ // processed safely. The read state will have the same rctx attached.
+ ReadIndex(ctx context.Context, rctx []byte) error
+
+ // Status returns the current status of the raft state machine.
+ Status() Status
+ // ReportUnreachable reports the given node is not reachable for the last send.
+ ReportUnreachable(id uint64)
+ // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
+ // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
+ // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
+ // snapshot (for e.g., while streaming it from leader to follower), should be reported to the
+ // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
+ // log probes until the follower can apply the snapshot and advance its state. If the follower
+ // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
+ // updates from the leader. Therefore, it is crucial that the application ensures that any
+ // failure in snapshot sending is caught and reported back to the leader; so it can resume raft
+ // log probing in the follower.
+ ReportSnapshot(id uint64, status SnapshotStatus)
+ // Stop performs any necessary termination of the Node.
+ Stop()
+}
+
+type Peer struct {
+ ID uint64
+ Context []byte
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+func StartNode(c *Config, peers []Peer) Node {
+ r := newRaft(c)
+ // become the follower at term 1 and apply initial configuration
+ // entries of term 1
+ r.becomeFollower(1, None)
+ for _, peer := range peers {
+ cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+ d, err := cc.Marshal()
+ if err != nil {
+ panic("unexpected marshal error")
+ }
+ e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
+ r.raftLog.append(e)
+ }
+ // Mark these initial entries as committed.
+ // TODO(bdarnell): These entries are still unstable; do we need to preserve
+ // the invariant that committed < unstable?
+ r.raftLog.committed = r.raftLog.lastIndex()
+ // Now apply them, mainly so that the application can call Campaign
+ // immediately after StartNode in tests. Note that these nodes will
+ // be added to raft twice: here and when the application's Ready
+ // loop calls ApplyConfChange. The calls to addNode must come after
+ // all calls to raftLog.append so progress.next is set after these
+ // bootstrapping entries (it is an error if we try to append these
+ // entries since they have already been committed).
+ // We do not set raftLog.applied so the application will be able
+ // to observe all conf changes via Ready.CommittedEntries.
+ for _, peer := range peers {
+ r.addNode(peer.ID)
+ }
+
+ n := newNode()
+ n.logger = c.Logger
+ go n.run(r)
+ return &n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+ r := newRaft(c)
+
+ n := newNode()
+ n.logger = c.Logger
+ go n.run(r)
+ return &n
+}
+
+type msgWithResult struct {
+ m pb.Message
+ result chan error
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+ propc chan msgWithResult
+ recvc chan pb.Message
+ confc chan pb.ConfChange
+ confstatec chan pb.ConfState
+ readyc chan Ready
+ advancec chan struct{}
+ tickc chan struct{}
+ done chan struct{}
+ stop chan struct{}
+ status chan chan Status
+
+ logger Logger
+}
+
+func newNode() node {
+ return node{
+ propc: make(chan msgWithResult),
+ recvc: make(chan pb.Message),
+ confc: make(chan pb.ConfChange),
+ confstatec: make(chan pb.ConfState),
+ readyc: make(chan Ready),
+ advancec: make(chan struct{}),
+ // make tickc a buffered chan, so raft node can buffer some ticks when the node
+ // is busy processing raft messages. Raft node will resume process buffered
+ // ticks when it becomes idle.
+ tickc: make(chan struct{}, 128),
+ done: make(chan struct{}),
+ stop: make(chan struct{}),
+ status: make(chan chan Status),
+ }
+}
+
+func (n *node) Stop() {
+ select {
+ case n.stop <- struct{}{}:
+ // Not already stopped, so trigger it
+ case <-n.done:
+ // Node has already been stopped - no need to do anything
+ return
+ }
+ // Block until the stop has been acknowledged by run()
+ <-n.done
+}
+
+func (n *node) run(r *raft) {
+ var propc chan msgWithResult
+ var readyc chan Ready
+ var advancec chan struct{}
+ var prevLastUnstablei, prevLastUnstablet uint64
+ var havePrevLastUnstablei bool
+ var prevSnapi uint64
+ var applyingToI uint64
+ var rd Ready
+
+ lead := None
+ prevSoftSt := r.softState()
+ prevHardSt := emptyState
+
+ for {
+ if advancec != nil {
+ readyc = nil
+ } else {
+ rd = newReady(r, prevSoftSt, prevHardSt)
+ if rd.containsUpdates() {
+ readyc = n.readyc
+ } else {
+ readyc = nil
+ }
+ }
+
+ if lead != r.lead {
+ if r.hasLeader() {
+ if lead == None {
+ r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+ } else {
+ r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+ }
+ propc = n.propc
+ } else {
+ r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+ propc = nil
+ }
+ lead = r.lead
+ }
+
+ select {
+ // TODO: maybe buffer the config propose if there exists one (the way
+ // described in raft dissertation)
+ // Currently it is dropped in Step silently.
+ case pm := <-propc:
+ m := pm.m
+ m.From = r.id
+ err := r.Step(m)
+ if pm.result != nil {
+ pm.result <- err
+ close(pm.result)
+ }
+ case m := <-n.recvc:
+ // filter out response message from unknown From.
+ if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+ r.Step(m)
+ }
+ case cc := <-n.confc:
+ if cc.NodeID == None {
+ select {
+ case n.confstatec <- pb.ConfState{
+ Nodes: r.nodes(),
+ Learners: r.learnerNodes()}:
+ case <-n.done:
+ }
+ break
+ }
+ switch cc.Type {
+ case pb.ConfChangeAddNode:
+ r.addNode(cc.NodeID)
+ case pb.ConfChangeAddLearnerNode:
+ r.addLearner(cc.NodeID)
+ case pb.ConfChangeRemoveNode:
+ // block incoming proposal when local node is
+ // removed
+ if cc.NodeID == r.id {
+ propc = nil
+ }
+ r.removeNode(cc.NodeID)
+ case pb.ConfChangeUpdateNode:
+ default:
+ panic("unexpected conf type")
+ }
+ select {
+ case n.confstatec <- pb.ConfState{
+ Nodes: r.nodes(),
+ Learners: r.learnerNodes()}:
+ case <-n.done:
+ }
+ case <-n.tickc:
+ r.tick()
+ case readyc <- rd:
+ if rd.SoftState != nil {
+ prevSoftSt = rd.SoftState
+ }
+ if len(rd.Entries) > 0 {
+ prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
+ prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
+ havePrevLastUnstablei = true
+ }
+ if !IsEmptyHardState(rd.HardState) {
+ prevHardSt = rd.HardState
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ prevSnapi = rd.Snapshot.Metadata.Index
+ }
+ if index := rd.appliedCursor(); index != 0 {
+ applyingToI = index
+ }
+
+ r.msgs = nil
+ r.readStates = nil
+ r.reduceUncommittedSize(rd.CommittedEntries)
+ advancec = n.advancec
+ case <-advancec:
+ if applyingToI != 0 {
+ r.raftLog.appliedTo(applyingToI)
+ applyingToI = 0
+ }
+ if havePrevLastUnstablei {
+ r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
+ havePrevLastUnstablei = false
+ }
+ r.raftLog.stableSnapTo(prevSnapi)
+ advancec = nil
+ case c := <-n.status:
+ c <- getStatus(r)
+ case <-n.stop:
+ close(n.done)
+ return
+ }
+ }
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+ select {
+ case n.tickc <- struct{}{}:
+ case <-n.done:
+ default:
+ n.logger.Warningf("A tick missed to fire. Node blocks too long!")
+ }
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+ return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+ // ignore unexpected local messages receiving over network
+ if IsLocalMsg(m.Type) {
+ // TODO: return an error?
+ return nil
+ }
+ return n.step(ctx, m)
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
+ data, err := cc.Marshal()
+ if err != nil {
+ return err
+ }
+ return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
+}
+
+func (n *node) step(ctx context.Context, m pb.Message) error {
+ return n.stepWithWaitOption(ctx, m, false)
+}
+
+func (n *node) stepWait(ctx context.Context, m pb.Message) error {
+ return n.stepWithWaitOption(ctx, m, true)
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
+ if m.Type != pb.MsgProp {
+ select {
+ case n.recvc <- m:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.done:
+ return ErrStopped
+ }
+ }
+ ch := n.propc
+ pm := msgWithResult{m: m}
+ if wait {
+ pm.result = make(chan error, 1)
+ }
+ select {
+ case ch <- pm:
+ if !wait {
+ return nil
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.done:
+ return ErrStopped
+ }
+ select {
+ case rsp := <-pm.result:
+ if rsp != nil {
+ return rsp
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.done:
+ return ErrStopped
+ }
+ return nil
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+ select {
+ case n.advancec <- struct{}{}:
+ case <-n.done:
+ }
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+ var cs pb.ConfState
+ select {
+ case n.confc <- cc:
+ case <-n.done:
+ }
+ select {
+ case cs = <-n.confstatec:
+ case <-n.done:
+ }
+ return &cs
+}
+
+func (n *node) Status() Status {
+ c := make(chan Status)
+ select {
+ case n.status <- c:
+ return <-c
+ case <-n.done:
+ return Status{}
+ }
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+ select {
+ case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+ case <-n.done:
+ }
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+ rej := status == SnapshotFailure
+
+ select {
+ case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+ case <-n.done:
+ }
+}
+
+func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
+ select {
+ // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
+ case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
+ case <-n.done:
+ case <-ctx.Done():
+ }
+}
+
+func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
+ return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
+
+func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
+ rd := Ready{
+ Entries: r.raftLog.unstableEntries(),
+ CommittedEntries: r.raftLog.nextEnts(),
+ Messages: r.msgs,
+ }
+ if softSt := r.softState(); !softSt.equal(prevSoftSt) {
+ rd.SoftState = softSt
+ }
+ if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
+ rd.HardState = hardSt
+ }
+ if r.raftLog.unstable.snapshot != nil {
+ rd.Snapshot = *r.raftLog.unstable.snapshot
+ }
+ if len(r.readStates) != 0 {
+ rd.ReadStates = r.readStates
+ }
+ rd.MustSync = MustSync(r.hardState(), prevHardSt, len(rd.Entries))
+ return rd
+}
+
+// MustSync returns true if the hard state and count of Raft entries indicate
+// that a synchronous write to persistent storage is required.
+func MustSync(st, prevst pb.HardState, entsnum int) bool {
+ // Persistent state on all servers:
+ // (Updated on stable storage before responding to RPCs)
+ // currentTerm
+ // votedFor
+ // log entries[]
+ return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
+}
diff --git a/vendor/go.etcd.io/etcd/raft/progress.go b/vendor/go.etcd.io/etcd/raft/progress.go
new file mode 100644
index 0000000..ef3787d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/progress.go
@@ -0,0 +1,284 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import "fmt"
+
+const (
+ ProgressStateProbe ProgressStateType = iota
+ ProgressStateReplicate
+ ProgressStateSnapshot
+)
+
+type ProgressStateType uint64
+
+var prstmap = [...]string{
+ "ProgressStateProbe",
+ "ProgressStateReplicate",
+ "ProgressStateSnapshot",
+}
+
+func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
+
+// Progress represents a follower’s progress in the view of the leader. Leader maintains
+// progresses of all followers, and sends entries to the follower based on its progress.
+type Progress struct {
+ Match, Next uint64
+ // State defines how the leader should interact with the follower.
+ //
+ // When in ProgressStateProbe, leader sends at most one replication message
+ // per heartbeat interval. It also probes actual progress of the follower.
+ //
+ // When in ProgressStateReplicate, leader optimistically increases next
+ // to the latest entry sent after sending replication message. This is
+ // an optimized state for fast replicating log entries to the follower.
+ //
+ // When in ProgressStateSnapshot, leader should have sent out snapshot
+ // before and stops sending any replication message.
+ State ProgressStateType
+
+ // Paused is used in ProgressStateProbe.
+ // When Paused is true, raft should pause sending replication message to this peer.
+ Paused bool
+ // PendingSnapshot is used in ProgressStateSnapshot.
+ // If there is a pending snapshot, the pendingSnapshot will be set to the
+ // index of the snapshot. If pendingSnapshot is set, the replication process of
+ // this Progress will be paused. raft will not resend snapshot until the pending one
+ // is reported to be failed.
+ PendingSnapshot uint64
+
+ // RecentActive is true if the progress is recently active. Receiving any messages
+ // from the corresponding follower indicates the progress is active.
+ // RecentActive can be reset to false after an election timeout.
+ RecentActive bool
+
+ // inflights is a sliding window for the inflight messages.
+ // Each inflight message contains one or more log entries.
+ // The max number of entries per message is defined in raft config as MaxSizePerMsg.
+ // Thus inflight effectively limits both the number of inflight messages
+ // and the bandwidth each Progress can use.
+ // When inflights is full, no more message should be sent.
+ // When a leader sends out a message, the index of the last
+ // entry should be added to inflights. The index MUST be added
+ // into inflights in order.
+ // When a leader receives a reply, the previous inflights should
+ // be freed by calling inflights.freeTo with the index of the last
+ // received entry.
+ ins *inflights
+
+ // IsLearner is true if this progress is tracked for a learner.
+ IsLearner bool
+}
+
+func (pr *Progress) resetState(state ProgressStateType) {
+ pr.Paused = false
+ pr.PendingSnapshot = 0
+ pr.State = state
+ pr.ins.reset()
+}
+
+func (pr *Progress) becomeProbe() {
+ // If the original state is ProgressStateSnapshot, progress knows that
+ // the pending snapshot has been sent to this peer successfully, then
+ // probes from pendingSnapshot + 1.
+ if pr.State == ProgressStateSnapshot {
+ pendingSnapshot := pr.PendingSnapshot
+ pr.resetState(ProgressStateProbe)
+ pr.Next = max(pr.Match+1, pendingSnapshot+1)
+ } else {
+ pr.resetState(ProgressStateProbe)
+ pr.Next = pr.Match + 1
+ }
+}
+
+func (pr *Progress) becomeReplicate() {
+ pr.resetState(ProgressStateReplicate)
+ pr.Next = pr.Match + 1
+}
+
+func (pr *Progress) becomeSnapshot(snapshoti uint64) {
+ pr.resetState(ProgressStateSnapshot)
+ pr.PendingSnapshot = snapshoti
+}
+
+// maybeUpdate returns false if the given n index comes from an outdated message.
+// Otherwise it updates the progress and returns true.
+func (pr *Progress) maybeUpdate(n uint64) bool {
+ var updated bool
+ if pr.Match < n {
+ pr.Match = n
+ updated = true
+ pr.resume()
+ }
+ if pr.Next < n+1 {
+ pr.Next = n + 1
+ }
+ return updated
+}
+
+func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
+
+// maybeDecrTo returns false if the given to index comes from an out of order message.
+// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
+func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
+ if pr.State == ProgressStateReplicate {
+ // the rejection must be stale if the progress has matched and "rejected"
+ // is smaller than "match".
+ if rejected <= pr.Match {
+ return false
+ }
+ // directly decrease next to match + 1
+ pr.Next = pr.Match + 1
+ return true
+ }
+
+ // the rejection must be stale if "rejected" does not match next - 1
+ if pr.Next-1 != rejected {
+ return false
+ }
+
+ if pr.Next = min(rejected, last+1); pr.Next < 1 {
+ pr.Next = 1
+ }
+ pr.resume()
+ return true
+}
+
+func (pr *Progress) pause() { pr.Paused = true }
+func (pr *Progress) resume() { pr.Paused = false }
+
+// IsPaused returns whether sending log entries to this node has been
+// paused. A node may be paused because it has rejected recent
+// MsgApps, is currently waiting for a snapshot, or has reached the
+// MaxInflightMsgs limit.
+func (pr *Progress) IsPaused() bool {
+ switch pr.State {
+ case ProgressStateProbe:
+ return pr.Paused
+ case ProgressStateReplicate:
+ return pr.ins.full()
+ case ProgressStateSnapshot:
+ return true
+ default:
+ panic("unexpected state")
+ }
+}
+
+func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
+
+// needSnapshotAbort returns true if snapshot progress's Match
+// is equal or higher than the pendingSnapshot.
+func (pr *Progress) needSnapshotAbort() bool {
+ return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
+}
+
+func (pr *Progress) String() string {
+ return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
+}
+
+type inflights struct {
+ // the starting index in the buffer
+ start int
+ // number of inflights in the buffer
+ count int
+
+ // the size of the buffer
+ size int
+
+ // buffer contains the index of the last entry
+ // inside one message.
+ buffer []uint64
+}
+
+func newInflights(size int) *inflights {
+ return &inflights{
+ size: size,
+ }
+}
+
+// add adds an inflight into inflights
+func (in *inflights) add(inflight uint64) {
+ if in.full() {
+ panic("cannot add into a full inflights")
+ }
+ next := in.start + in.count
+ size := in.size
+ if next >= size {
+ next -= size
+ }
+ if next >= len(in.buffer) {
+ in.growBuf()
+ }
+ in.buffer[next] = inflight
+ in.count++
+}
+
+// grow the inflight buffer by doubling up to inflights.size. We grow on demand
+// instead of preallocating to inflights.size to handle systems which have
+// thousands of Raft groups per process.
+func (in *inflights) growBuf() {
+ newSize := len(in.buffer) * 2
+ if newSize == 0 {
+ newSize = 1
+ } else if newSize > in.size {
+ newSize = in.size
+ }
+ newBuffer := make([]uint64, newSize)
+ copy(newBuffer, in.buffer)
+ in.buffer = newBuffer
+}
+
+// freeTo frees the inflights smaller or equal to the given `to` flight.
+func (in *inflights) freeTo(to uint64) {
+ if in.count == 0 || to < in.buffer[in.start] {
+ // out of the left side of the window
+ return
+ }
+
+ idx := in.start
+ var i int
+ for i = 0; i < in.count; i++ {
+ if to < in.buffer[idx] { // found the first large inflight
+ break
+ }
+
+ // increase index and maybe rotate
+ size := in.size
+ if idx++; idx >= size {
+ idx -= size
+ }
+ }
+ // free i inflights and set new start index
+ in.count -= i
+ in.start = idx
+ if in.count == 0 {
+ // inflights is empty, reset the start index so that we don't grow the
+ // buffer unnecessarily.
+ in.start = 0
+ }
+}
+
+func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
+
+// full returns true if the inflights is full.
+func (in *inflights) full() bool {
+ return in.count == in.size
+}
+
+// resets frees all inflights.
+func (in *inflights) reset() {
+ in.count = 0
+ in.start = 0
+}
diff --git a/vendor/go.etcd.io/etcd/raft/raft.go b/vendor/go.etcd.io/etcd/raft/raft.go
new file mode 100644
index 0000000..e1e6a16
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/raft.go
@@ -0,0 +1,1575 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "math/rand"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ pb "go.etcd.io/etcd/raft/raftpb"
+)
+
+// None is a placeholder node ID used when there is no leader.
+const None uint64 = 0
+const noLimit = math.MaxUint64
+
+// Possible values for StateType.
+const (
+ StateFollower StateType = iota
+ StateCandidate
+ StateLeader
+ StatePreCandidate
+ numStates
+)
+
+type ReadOnlyOption int
+
+const (
+ // ReadOnlySafe guarantees the linearizability of the read only request by
+ // communicating with the quorum. It is the default and suggested option.
+ ReadOnlySafe ReadOnlyOption = iota
+ // ReadOnlyLeaseBased ensures linearizability of the read only request by
+ // relying on the leader lease. It can be affected by clock drift.
+ // If the clock drift is unbounded, leader might keep the lease longer than it
+ // should (clock can move backward/pause without any bound). ReadIndex is not safe
+ // in that case.
+ ReadOnlyLeaseBased
+)
+
+// Possible values for CampaignType
+const (
+ // campaignPreElection represents the first phase of a normal election when
+ // Config.PreVote is true.
+ campaignPreElection CampaignType = "CampaignPreElection"
+ // campaignElection represents a normal (time-based) election (the second phase
+ // of the election when Config.PreVote is true).
+ campaignElection CampaignType = "CampaignElection"
+ // campaignTransfer represents the type of leader transfer
+ campaignTransfer CampaignType = "CampaignTransfer"
+)
+
+// ErrProposalDropped is returned when the proposal is ignored by some cases,
+// so that the proposer can be notified and fail fast.
+var ErrProposalDropped = errors.New("raft proposal dropped")
+
+// lockedRand is a small wrapper around rand.Rand to provide
+// synchronization among multiple raft groups. Only the methods needed
+// by the code are exposed (e.g. Intn).
+type lockedRand struct {
+ mu sync.Mutex
+ rand *rand.Rand
+}
+
+func (r *lockedRand) Intn(n int) int {
+ r.mu.Lock()
+ v := r.rand.Intn(n)
+ r.mu.Unlock()
+ return v
+}
+
+var globalRand = &lockedRand{
+ rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+}
+
+// CampaignType represents the type of campaigning
+// the reason we use the type of string instead of uint64
+// is because it's simpler to compare and fill in raft entries
+type CampaignType string
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+ "StateFollower",
+ "StateCandidate",
+ "StateLeader",
+ "StatePreCandidate",
+}
+
+func (st StateType) String() string {
+ return stmap[uint64(st)]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+ // ID is the identity of the local raft. ID cannot be 0.
+ ID uint64
+
+ // peers contains the IDs of all nodes (including self) in the raft cluster. It
+ // should only be set when starting a new raft cluster. Restarting raft from
+ // previous configuration will panic if peers is set. peer is private and only
+ // used for testing right now.
+ peers []uint64
+
+ // learners contains the IDs of all learner nodes (including self if the
+ // local node is a learner) in the raft cluster. learners only receives
+ // entries from the leader node. It does not vote or promote itself.
+ learners []uint64
+
+ // ElectionTick is the number of Node.Tick invocations that must pass between
+ // elections. That is, if a follower does not receive any message from the
+ // leader of current term before ElectionTick has elapsed, it will become
+ // candidate and start an election. ElectionTick must be greater than
+ // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+ // unnecessary leader switching.
+ ElectionTick int
+ // HeartbeatTick is the number of Node.Tick invocations that must pass between
+ // heartbeats. That is, a leader sends heartbeat messages to maintain its
+ // leadership every HeartbeatTick ticks.
+ HeartbeatTick int
+
+ // Storage is the storage for raft. raft generates entries and states to be
+ // stored in storage. raft reads the persisted entries and states out of
+ // Storage when it needs. raft reads out the previous state and configuration
+ // out of storage when restarting.
+ Storage Storage
+ // Applied is the last applied index. It should only be set when restarting
+ // raft. raft will not return entries to the application smaller or equal to
+ // Applied. If Applied is unset when restarting, raft might return previous
+ // applied entries. This is a very application dependent configuration.
+ Applied uint64
+
+ // MaxSizePerMsg limits the max byte size of each append message. Smaller
+ // value lowers the raft recovery cost(initial probing and message lost
+ // during normal operation). On the other side, it might affect the
+ // throughput during normal replication. Note: math.MaxUint64 for unlimited,
+ // 0 for at most one entry per message.
+ MaxSizePerMsg uint64
+ // MaxCommittedSizePerReady limits the size of the committed entries which
+ // can be applied.
+ MaxCommittedSizePerReady uint64
+ // MaxUncommittedEntriesSize limits the aggregate byte size of the
+ // uncommitted entries that may be appended to a leader's log. Once this
+ // limit is exceeded, proposals will begin to return ErrProposalDropped
+ // errors. Note: 0 for no limit.
+ MaxUncommittedEntriesSize uint64
+ // MaxInflightMsgs limits the max number of in-flight append messages during
+ // optimistic replication phase. The application transportation layer usually
+ // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+ // overflowing that sending buffer. TODO (xiangli): feedback to application to
+ // limit the proposal rate?
+ MaxInflightMsgs int
+
+ // CheckQuorum specifies if the leader should check quorum activity. Leader
+ // steps down when quorum is not active for an electionTimeout.
+ CheckQuorum bool
+
+ // PreVote enables the Pre-Vote algorithm described in raft thesis section
+ // 9.6. This prevents disruption when a node that has been partitioned away
+ // rejoins the cluster.
+ PreVote bool
+
+ // ReadOnlyOption specifies how the read only request is processed.
+ //
+ // ReadOnlySafe guarantees the linearizability of the read only request by
+ // communicating with the quorum. It is the default and suggested option.
+ //
+ // ReadOnlyLeaseBased ensures linearizability of the read only request by
+ // relying on the leader lease. It can be affected by clock drift.
+ // If the clock drift is unbounded, leader might keep the lease longer than it
+ // should (clock can move backward/pause without any bound). ReadIndex is not safe
+ // in that case.
+ // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
+ ReadOnlyOption ReadOnlyOption
+
+ // Logger is the logger used for raft log. For multinode which can host
+ // multiple raft group, each raft group can have its own logger
+ Logger Logger
+
+ // DisableProposalForwarding set to true means that followers will drop
+ // proposals, rather than forwarding them to the leader. One use case for
+ // this feature would be in a situation where the Raft leader is used to
+ // compute the data of a proposal, for example, adding a timestamp from a
+ // hybrid logical clock to data in a monotonically increasing way. Forwarding
+ // should be disabled to prevent a follower with an inaccurate hybrid
+ // logical clock from assigning the timestamp and then forwarding the data
+ // to the leader.
+ DisableProposalForwarding bool
+}
+
+func (c *Config) validate() error {
+ if c.ID == None {
+ return errors.New("cannot use none as id")
+ }
+
+ if c.HeartbeatTick <= 0 {
+ return errors.New("heartbeat tick must be greater than 0")
+ }
+
+ if c.ElectionTick <= c.HeartbeatTick {
+ return errors.New("election tick must be greater than heartbeat tick")
+ }
+
+ if c.Storage == nil {
+ return errors.New("storage cannot be nil")
+ }
+
+ if c.MaxUncommittedEntriesSize == 0 {
+ c.MaxUncommittedEntriesSize = noLimit
+ }
+
+ // default MaxCommittedSizePerReady to MaxSizePerMsg because they were
+ // previously the same parameter.
+ if c.MaxCommittedSizePerReady == 0 {
+ c.MaxCommittedSizePerReady = c.MaxSizePerMsg
+ }
+
+ if c.MaxInflightMsgs <= 0 {
+ return errors.New("max inflight messages must be greater than 0")
+ }
+
+ if c.Logger == nil {
+ c.Logger = raftLogger
+ }
+
+ if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
+ return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
+ }
+
+ return nil
+}
+
+type raft struct {
+ id uint64
+
+ Term uint64
+ Vote uint64
+
+ readStates []ReadState
+
+ // the log
+ raftLog *raftLog
+
+ maxMsgSize uint64
+ maxUncommittedSize uint64
+ maxInflight int
+ prs map[uint64]*Progress
+ learnerPrs map[uint64]*Progress
+ matchBuf uint64Slice
+
+ state StateType
+
+ // isLearner is true if the local raft node is a learner.
+ isLearner bool
+
+ votes map[uint64]bool
+
+ msgs []pb.Message
+
+ // the leader id
+ lead uint64
+ // leadTransferee is id of the leader transfer target when its value is not zero.
+ // Follow the procedure defined in raft thesis 3.10.
+ leadTransferee uint64
+ // Only one conf change may be pending (in the log, but not yet
+ // applied) at a time. This is enforced via pendingConfIndex, which
+ // is set to a value >= the log index of the latest pending
+ // configuration change (if any). Config changes are only allowed to
+ // be proposed if the leader's applied index is greater than this
+ // value.
+ pendingConfIndex uint64
+ // an estimate of the size of the uncommitted tail of the Raft log. Used to
+ // prevent unbounded log growth. Only maintained by the leader. Reset on
+ // term changes.
+ uncommittedSize uint64
+
+ readOnly *readOnly
+
+ // number of ticks since it reached last electionTimeout when it is leader
+ // or candidate.
+ // number of ticks since it reached last electionTimeout or received a
+ // valid message from current leader when it is a follower.
+ electionElapsed int
+
+ // number of ticks since it reached last heartbeatTimeout.
+ // only leader keeps heartbeatElapsed.
+ heartbeatElapsed int
+
+ checkQuorum bool
+ preVote bool
+
+ heartbeatTimeout int
+ electionTimeout int
+ // randomizedElectionTimeout is a random number between
+ // [electiontimeout, 2 * electiontimeout - 1]. It gets reset
+ // when raft changes its state to follower or candidate.
+ randomizedElectionTimeout int
+ disableProposalForwarding bool
+
+ tick func()
+ step stepFunc
+
+ logger Logger
+}
+
+func newRaft(c *Config) *raft {
+ if err := c.validate(); err != nil {
+ panic(err.Error())
+ }
+ raftlog := newLogWithSize(c.Storage, c.Logger, c.MaxCommittedSizePerReady)
+ hs, cs, err := c.Storage.InitialState()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ peers := c.peers
+ learners := c.learners
+ if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
+ if len(peers) > 0 || len(learners) > 0 {
+ // TODO(bdarnell): the peers argument is always nil except in
+ // tests; the argument should be removed and these tests should be
+ // updated to specify their nodes through a snapshot.
+ panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
+ }
+ peers = cs.Nodes
+ learners = cs.Learners
+ }
+ r := &raft{
+ id: c.ID,
+ lead: None,
+ isLearner: false,
+ raftLog: raftlog,
+ maxMsgSize: c.MaxSizePerMsg,
+ maxInflight: c.MaxInflightMsgs,
+ maxUncommittedSize: c.MaxUncommittedEntriesSize,
+ prs: make(map[uint64]*Progress),
+ learnerPrs: make(map[uint64]*Progress),
+ electionTimeout: c.ElectionTick,
+ heartbeatTimeout: c.HeartbeatTick,
+ logger: c.Logger,
+ checkQuorum: c.CheckQuorum,
+ preVote: c.PreVote,
+ readOnly: newReadOnly(c.ReadOnlyOption),
+ disableProposalForwarding: c.DisableProposalForwarding,
+ }
+ for _, p := range peers {
+ r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
+ }
+ for _, p := range learners {
+ if _, ok := r.prs[p]; ok {
+ panic(fmt.Sprintf("node %x is in both learner and peer list", p))
+ }
+ r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
+ if r.id == p {
+ r.isLearner = true
+ }
+ }
+
+ if !isHardStateEqual(hs, emptyState) {
+ r.loadState(hs)
+ }
+ if c.Applied > 0 {
+ raftlog.appliedTo(c.Applied)
+ }
+ r.becomeFollower(r.Term, None)
+
+ var nodesStrs []string
+ for _, n := range r.nodes() {
+ nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+ }
+
+ r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+ r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
+ return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+ return pb.HardState{
+ Term: r.Term,
+ Vote: r.Vote,
+ Commit: r.raftLog.committed,
+ }
+}
+
+func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
+
+func (r *raft) nodes() []uint64 {
+ nodes := make([]uint64, 0, len(r.prs))
+ for id := range r.prs {
+ nodes = append(nodes, id)
+ }
+ sort.Sort(uint64Slice(nodes))
+ return nodes
+}
+
+func (r *raft) learnerNodes() []uint64 {
+ nodes := make([]uint64, 0, len(r.learnerPrs))
+ for id := range r.learnerPrs {
+ nodes = append(nodes, id)
+ }
+ sort.Sort(uint64Slice(nodes))
+ return nodes
+}
+
+// send persists state to stable storage and then sends to its mailbox.
+func (r *raft) send(m pb.Message) {
+ m.From = r.id
+ if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
+ if m.Term == 0 {
+ // All {pre-,}campaign messages need to have the term set when
+ // sending.
+ // - MsgVote: m.Term is the term the node is campaigning for,
+ // non-zero as we increment the term when campaigning.
+ // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
+ // granted, non-zero for the same reason MsgVote is
+ // - MsgPreVote: m.Term is the term the node will campaign,
+ // non-zero as we use m.Term to indicate the next term we'll be
+ // campaigning for
+ // - MsgPreVoteResp: m.Term is the term received in the original
+ // MsgPreVote if the pre-vote was granted, non-zero for the
+ // same reasons MsgPreVote is
+ panic(fmt.Sprintf("term should be set when sending %s", m.Type))
+ }
+ } else {
+ if m.Term != 0 {
+ panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
+ }
+ // do not attach term to MsgProp, MsgReadIndex
+ // proposals are a way to forward to the leader and
+ // should be treated as local message.
+ // MsgReadIndex is also forwarded to leader.
+ if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
+ m.Term = r.Term
+ }
+ }
+ r.msgs = append(r.msgs, m)
+}
+
+func (r *raft) getProgress(id uint64) *Progress {
+ if pr, ok := r.prs[id]; ok {
+ return pr
+ }
+
+ return r.learnerPrs[id]
+}
+
+// sendAppend sends an append RPC with new entries (if any) and the
+// current commit index to the given peer.
+func (r *raft) sendAppend(to uint64) {
+ r.maybeSendAppend(to, true)
+}
+
+// maybeSendAppend sends an append RPC with new entries to the given peer,
+// if necessary. Returns true if a message was sent. The sendIfEmpty
+// argument controls whether messages with no entries will be sent
+// ("empty" messages are useful to convey updated Commit indexes, but
+// are undesirable when we're sending multiple messages in a batch).
+func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool {
+ pr := r.getProgress(to)
+ if pr.IsPaused() {
+ return false
+ }
+ m := pb.Message{}
+ m.To = to
+
+ term, errt := r.raftLog.term(pr.Next - 1)
+ ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
+ if len(ents) == 0 && !sendIfEmpty {
+ return false
+ }
+
+ if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
+ if !pr.RecentActive {
+ r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+ return false
+ }
+
+ m.Type = pb.MsgSnap
+ snapshot, err := r.raftLog.snapshot()
+ if err != nil {
+ if err == ErrSnapshotTemporarilyUnavailable {
+ r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+ return false
+ }
+ panic(err) // TODO(bdarnell)
+ }
+ if IsEmptySnap(snapshot) {
+ panic("need non-empty snapshot")
+ }
+ m.Snapshot = snapshot
+ sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+ r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+ r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+ pr.becomeSnapshot(sindex)
+ r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+ } else {
+ m.Type = pb.MsgApp
+ m.Index = pr.Next - 1
+ m.LogTerm = term
+ m.Entries = ents
+ m.Commit = r.raftLog.committed
+ if n := len(m.Entries); n != 0 {
+ switch pr.State {
+ // optimistically increase the next when in ProgressStateReplicate
+ case ProgressStateReplicate:
+ last := m.Entries[n-1].Index
+ pr.optimisticUpdate(last)
+ pr.ins.add(last)
+ case ProgressStateProbe:
+ pr.pause()
+ default:
+ r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
+ }
+ }
+ }
+ r.send(m)
+ return true
+}
+
+// sendHeartbeat sends a heartbeat RPC to the given peer.
+func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
+ // Attach the commit as min(to.matched, r.committed).
+ // When the leader sends out heartbeat message,
+ // the receiver(follower) might not be matched with the leader
+ // or it might not have all the committed entries.
+ // The leader MUST NOT forward the follower's commit to
+ // an unmatched index.
+ commit := min(r.getProgress(to).Match, r.raftLog.committed)
+ m := pb.Message{
+ To: to,
+ Type: pb.MsgHeartbeat,
+ Commit: commit,
+ Context: ctx,
+ }
+
+ r.send(m)
+}
+
+func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
+ for id, pr := range r.prs {
+ f(id, pr)
+ }
+
+ for id, pr := range r.learnerPrs {
+ f(id, pr)
+ }
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.prs.
+func (r *raft) bcastAppend() {
+ r.forEachProgress(func(id uint64, _ *Progress) {
+ if id == r.id {
+ return
+ }
+
+ r.sendAppend(id)
+ })
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+ lastCtx := r.readOnly.lastPendingRequestCtx()
+ if len(lastCtx) == 0 {
+ r.bcastHeartbeatWithCtx(nil)
+ } else {
+ r.bcastHeartbeatWithCtx([]byte(lastCtx))
+ }
+}
+
+func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
+ r.forEachProgress(func(id uint64, _ *Progress) {
+ if id == r.id {
+ return
+ }
+ r.sendHeartbeat(id, ctx)
+ })
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if
+// the commit index changed (in which case the caller should call
+// r.bcastAppend).
+func (r *raft) maybeCommit() bool {
+ // Preserving matchBuf across calls is an optimization
+ // used to avoid allocating a new slice on each call.
+ if cap(r.matchBuf) < len(r.prs) {
+ r.matchBuf = make(uint64Slice, len(r.prs))
+ }
+ mis := r.matchBuf[:len(r.prs)]
+ idx := 0
+ for _, p := range r.prs {
+ mis[idx] = p.Match
+ idx++
+ }
+ sort.Sort(mis)
+ mci := mis[len(mis)-r.quorum()]
+ return r.raftLog.maybeCommit(mci, r.Term)
+}
+
+func (r *raft) reset(term uint64) {
+ if r.Term != term {
+ r.Term = term
+ r.Vote = None
+ }
+ r.lead = None
+
+ r.electionElapsed = 0
+ r.heartbeatElapsed = 0
+ r.resetRandomizedElectionTimeout()
+
+ r.abortLeaderTransfer()
+
+ r.votes = make(map[uint64]bool)
+ r.forEachProgress(func(id uint64, pr *Progress) {
+ *pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
+ if id == r.id {
+ pr.Match = r.raftLog.lastIndex()
+ }
+ })
+
+ r.pendingConfIndex = 0
+ r.uncommittedSize = 0
+ r.readOnly = newReadOnly(r.readOnly.option)
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) {
+ li := r.raftLog.lastIndex()
+ for i := range es {
+ es[i].Term = r.Term
+ es[i].Index = li + 1 + uint64(i)
+ }
+ // Track the size of this uncommitted proposal.
+ if !r.increaseUncommittedSize(es) {
+ r.logger.Debugf(
+ "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal",
+ r.id,
+ )
+ // Drop the proposal.
+ return false
+ }
+ // use latest "last" index after truncate/append
+ li = r.raftLog.append(es...)
+ r.getProgress(r.id).maybeUpdate(li)
+ // Regardless of maybeCommit's return, our caller will call bcastAppend.
+ r.maybeCommit()
+ return true
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+ r.electionElapsed++
+
+ if r.promotable() && r.pastElectionTimeout() {
+ r.electionElapsed = 0
+ r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
+ }
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+ r.heartbeatElapsed++
+ r.electionElapsed++
+
+ if r.electionElapsed >= r.electionTimeout {
+ r.electionElapsed = 0
+ if r.checkQuorum {
+ r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
+ }
+ // If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
+ if r.state == StateLeader && r.leadTransferee != None {
+ r.abortLeaderTransfer()
+ }
+ }
+
+ if r.state != StateLeader {
+ return
+ }
+
+ if r.heartbeatElapsed >= r.heartbeatTimeout {
+ r.heartbeatElapsed = 0
+ r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
+ }
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+ r.step = stepFollower
+ r.reset(term)
+ r.tick = r.tickElection
+ r.lead = lead
+ r.state = StateFollower
+ r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeCandidate() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateLeader {
+ panic("invalid transition [leader -> candidate]")
+ }
+ r.step = stepCandidate
+ r.reset(r.Term + 1)
+ r.tick = r.tickElection
+ r.Vote = r.id
+ r.state = StateCandidate
+ r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomePreCandidate() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateLeader {
+ panic("invalid transition [leader -> pre-candidate]")
+ }
+ // Becoming a pre-candidate changes our step functions and state,
+ // but doesn't change anything else. In particular it does not increase
+ // r.Term or change r.Vote.
+ r.step = stepCandidate
+ r.votes = make(map[uint64]bool)
+ r.tick = r.tickElection
+ r.lead = None
+ r.state = StatePreCandidate
+ r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateFollower {
+ panic("invalid transition [follower -> leader]")
+ }
+ r.step = stepLeader
+ r.reset(r.Term)
+ r.tick = r.tickHeartbeat
+ r.lead = r.id
+ r.state = StateLeader
+ // Followers enter replicate mode when they've been successfully probed
+ // (perhaps after having received a snapshot as a result). The leader is
+ // trivially in this state. Note that r.reset() has initialized this
+ // progress with the last index already.
+ r.prs[r.id].becomeReplicate()
+
+ // Conservatively set the pendingConfIndex to the last index in the
+ // log. There may or may not be a pending config change, but it's
+ // safe to delay any future proposals until we commit all our
+ // pending log entries, and scanning the entire tail of the log
+ // could be expensive.
+ r.pendingConfIndex = r.raftLog.lastIndex()
+
+ emptyEnt := pb.Entry{Data: nil}
+ if !r.appendEntry(emptyEnt) {
+ // This won't happen because we just called reset() above.
+ r.logger.Panic("empty entry was dropped")
+ }
+ // As a special case, don't count the initial empty entry towards the
+ // uncommitted log quota. This is because we want to preserve the
+ // behavior of allowing one entry larger than quota if the current
+ // usage is zero.
+ r.reduceUncommittedSize([]pb.Entry{emptyEnt})
+ r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) campaign(t CampaignType) {
+ var term uint64
+ var voteMsg pb.MessageType
+ if t == campaignPreElection {
+ r.becomePreCandidate()
+ voteMsg = pb.MsgPreVote
+ // PreVote RPCs are sent for the next term before we've incremented r.Term.
+ term = r.Term + 1
+ } else {
+ r.becomeCandidate()
+ voteMsg = pb.MsgVote
+ term = r.Term
+ }
+ if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
+ // We won the election after voting for ourselves (which must mean that
+ // this is a single-node cluster). Advance to the next state.
+ if t == campaignPreElection {
+ r.campaign(campaignElection)
+ } else {
+ r.becomeLeader()
+ }
+ return
+ }
+ for id := range r.prs {
+ if id == r.id {
+ continue
+ }
+ r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
+
+ var ctx []byte
+ if t == campaignTransfer {
+ ctx = []byte(t)
+ }
+ r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
+ }
+}
+
+func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
+ if v {
+ r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
+ } else {
+ r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
+ }
+ if _, ok := r.votes[id]; !ok {
+ r.votes[id] = v
+ }
+ for _, vv := range r.votes {
+ if vv {
+ granted++
+ }
+ }
+ return granted
+}
+
+func (r *raft) Step(m pb.Message) error {
+ // Handle the message term, which may result in our stepping down to a follower.
+ switch {
+ case m.Term == 0:
+ // local message
+ case m.Term > r.Term:
+ if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+ force := bytes.Equal(m.Context, []byte(campaignTransfer))
+ inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
+ if !force && inLease {
+ // If a server receives a RequestVote request within the minimum election timeout
+ // of hearing from a current leader, it does not update its term or grant its vote
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
+ return nil
+ }
+ }
+ switch {
+ case m.Type == pb.MsgPreVote:
+ // Never change our term in response to a PreVote
+ case m.Type == pb.MsgPreVoteResp && !m.Reject:
+ // We send pre-vote requests with a term in our future. If the
+ // pre-vote is granted, we will increment our term when we get a
+ // quorum. If it is not, the term comes from the node that
+ // rejected our vote so we should become a follower at the new
+ // term.
+ default:
+ r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+ r.id, r.Term, m.Type, m.From, m.Term)
+ if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
+ r.becomeFollower(m.Term, m.From)
+ } else {
+ r.becomeFollower(m.Term, None)
+ }
+ }
+
+ case m.Term < r.Term:
+ if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
+ // We have received messages from a leader at a lower term. It is possible
+ // that these messages were simply delayed in the network, but this could
+ // also mean that this node has advanced its term number during a network
+ // partition, and it is now unable to either win an election or to rejoin
+ // the majority on the old term. If checkQuorum is false, this will be
+ // handled by incrementing term numbers in response to MsgVote with a
+ // higher term, but if checkQuorum is true we may not advance the term on
+ // MsgVote and must generate other messages to advance the term. The net
+ // result of these two features is to minimize the disruption caused by
+ // nodes that have been removed from the cluster's configuration: a
+ // removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
+ // but it will not receive MsgApp or MsgHeartbeat, so it will not create
+ // disruptive term increases, by notifying leader of this node's activeness.
+ // The above comments also true for Pre-Vote
+ //
+ // When follower gets isolated, it soon starts an election ending
+ // up with a higher term than leader, although it won't receive enough
+ // votes to win the election. When it regains connectivity, this response
+ // with "pb.MsgAppResp" of higher term would force leader to step down.
+ // However, this disruption is inevitable to free this stuck node with
+ // fresh election. This can be prevented with Pre-Vote phase.
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
+ } else if m.Type == pb.MsgPreVote {
+ // Before Pre-Vote enable, there may have candidate with higher term,
+ // but less log. After update to Pre-Vote, the cluster may deadlock if
+ // we drop messages with a lower term.
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+ r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true})
+ } else {
+ // ignore other cases
+ r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+ r.id, r.Term, m.Type, m.From, m.Term)
+ }
+ return nil
+ }
+
+ switch m.Type {
+ case pb.MsgHup:
+ if r.state != StateLeader {
+ ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
+ if err != nil {
+ r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
+ }
+ if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
+ r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
+ return nil
+ }
+
+ r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+ if r.preVote {
+ r.campaign(campaignPreElection)
+ } else {
+ r.campaign(campaignElection)
+ }
+ } else {
+ r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+ }
+
+ case pb.MsgVote, pb.MsgPreVote:
+ if r.isLearner {
+ // TODO: learner may need to vote, in case of node down when confchange.
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+ return nil
+ }
+ // We can vote if this is a repeat of a vote we've already cast...
+ canVote := r.Vote == m.From ||
+ // ...we haven't voted and we don't think there's a leader yet in this term...
+ (r.Vote == None && r.lead == None) ||
+ // ...or this is a PreVote for a future term...
+ (m.Type == pb.MsgPreVote && m.Term > r.Term)
+ // ...and we believe the candidate is up to date.
+ if canVote && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+ // When responding to Msg{Pre,}Vote messages we include the term
+ // from the message, not the local term. To see why consider the
+ // case where a single node was previously partitioned away and
+ // it's local term is now of date. If we include the local term
+ // (recall that for pre-votes we don't update the local term), the
+ // (pre-)campaigning node on the other end will proceed to ignore
+ // the message (it ignores all out of date messages).
+ // The term in the original message and current local term are the
+ // same in the case of regular votes, but different for pre-votes.
+ r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
+ if m.Type == pb.MsgVote {
+ // Only record real votes.
+ r.electionElapsed = 0
+ r.Vote = m.From
+ }
+ } else {
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+ r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+ r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
+ }
+
+ default:
+ err := r.step(r, m)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type stepFunc func(r *raft, m pb.Message) error
+
+func stepLeader(r *raft, m pb.Message) error {
+ // These message types do not require any progress for m.From.
+ switch m.Type {
+ case pb.MsgBeat:
+ r.bcastHeartbeat()
+ return nil
+ case pb.MsgCheckQuorum:
+ if !r.checkQuorumActive() {
+ r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+ r.becomeFollower(r.Term, None)
+ }
+ return nil
+ case pb.MsgProp:
+ if len(m.Entries) == 0 {
+ r.logger.Panicf("%x stepped empty MsgProp", r.id)
+ }
+ if _, ok := r.prs[r.id]; !ok {
+ // If we are not currently a member of the range (i.e. this node
+ // was removed from the configuration while serving as leader),
+ // drop any new proposals.
+ return ErrProposalDropped
+ }
+ if r.leadTransferee != None {
+ r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
+ return ErrProposalDropped
+ }
+
+ for i, e := range m.Entries {
+ if e.Type == pb.EntryConfChange {
+ if r.pendingConfIndex > r.raftLog.applied {
+ r.logger.Infof("propose conf %s ignored since pending unapplied configuration [index %d, applied %d]",
+ e.String(), r.pendingConfIndex, r.raftLog.applied)
+ m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+ } else {
+ r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1
+ }
+ }
+ }
+
+ if !r.appendEntry(m.Entries...) {
+ return ErrProposalDropped
+ }
+ r.bcastAppend()
+ return nil
+ case pb.MsgReadIndex:
+ if r.quorum() > 1 {
+ if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
+ // Reject read only request when this leader has not committed any log entry at its term.
+ return nil
+ }
+
+ // thinking: use an interally defined context instead of the user given context.
+ // We can express this in terms of the term and index instead of a user-supplied value.
+ // This would allow multiple reads to piggyback on the same message.
+ switch r.readOnly.option {
+ case ReadOnlySafe:
+ r.readOnly.addRequest(r.raftLog.committed, m)
+ r.bcastHeartbeatWithCtx(m.Entries[0].Data)
+ case ReadOnlyLeaseBased:
+ ri := r.raftLog.committed
+ if m.From == None || m.From == r.id { // from local member
+ r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+ } else {
+ r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
+ }
+ }
+ } else {
+ r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+ }
+
+ return nil
+ }
+
+ // All other message types require a progress for m.From (pr).
+ pr := r.getProgress(m.From)
+ if pr == nil {
+ r.logger.Debugf("%x no progress available for %x", r.id, m.From)
+ return nil
+ }
+ switch m.Type {
+ case pb.MsgAppResp:
+ pr.RecentActive = true
+
+ if m.Reject {
+ r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
+ r.id, m.RejectHint, m.From, m.Index)
+ if pr.maybeDecrTo(m.Index, m.RejectHint) {
+ r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+ if pr.State == ProgressStateReplicate {
+ pr.becomeProbe()
+ }
+ r.sendAppend(m.From)
+ }
+ } else {
+ oldPaused := pr.IsPaused()
+ if pr.maybeUpdate(m.Index) {
+ switch {
+ case pr.State == ProgressStateProbe:
+ pr.becomeReplicate()
+ case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
+ r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ // Transition back to replicating state via probing state
+ // (which takes the snapshot into account). If we didn't
+ // move to replicating state, that would only happen with
+ // the next round of appends (but there may not be a next
+ // round for a while, exposing an inconsistent RaftStatus).
+ pr.becomeProbe()
+ pr.becomeReplicate()
+ case pr.State == ProgressStateReplicate:
+ pr.ins.freeTo(m.Index)
+ }
+
+ if r.maybeCommit() {
+ r.bcastAppend()
+ } else if oldPaused {
+ // If we were paused before, this node may be missing the
+ // latest commit index, so send it.
+ r.sendAppend(m.From)
+ }
+ // We've updated flow control information above, which may
+ // allow us to send multiple (size-limited) in-flight messages
+ // at once (such as when transitioning from probe to
+ // replicate, or when freeTo() covers multiple messages). If
+ // we have more entries to send, send as many messages as we
+ // can (without sending empty messages for the commit index)
+ for r.maybeSendAppend(m.From, false) {
+ }
+ // Transfer leadership is in progress.
+ if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
+ r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
+ r.sendTimeoutNow(m.From)
+ }
+ }
+ }
+ case pb.MsgHeartbeatResp:
+ pr.RecentActive = true
+ pr.resume()
+
+ // free one slot for the full inflights window to allow progress.
+ if pr.State == ProgressStateReplicate && pr.ins.full() {
+ pr.ins.freeFirstOne()
+ }
+ if pr.Match < r.raftLog.lastIndex() {
+ r.sendAppend(m.From)
+ }
+
+ if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
+ return nil
+ }
+
+ ackCount := r.readOnly.recvAck(m)
+ if ackCount < r.quorum() {
+ return nil
+ }
+
+ rss := r.readOnly.advance(m)
+ for _, rs := range rss {
+ req := rs.req
+ if req.From == None || req.From == r.id { // from local member
+ r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
+ } else {
+ r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
+ }
+ }
+ case pb.MsgSnapStatus:
+ if pr.State != ProgressStateSnapshot {
+ return nil
+ }
+ if !m.Reject {
+ pr.becomeProbe()
+ r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ } else {
+ pr.snapshotFailure()
+ pr.becomeProbe()
+ r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ }
+ // If snapshot finish, wait for the msgAppResp from the remote node before sending
+ // out the next msgApp.
+ // If snapshot failure, wait for a heartbeat interval before next try
+ pr.pause()
+ case pb.MsgUnreachable:
+ // During optimistic replication, if the remote becomes unreachable,
+ // there is huge probability that a MsgApp is lost.
+ if pr.State == ProgressStateReplicate {
+ pr.becomeProbe()
+ }
+ r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+ case pb.MsgTransferLeader:
+ if pr.IsLearner {
+ r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
+ return nil
+ }
+ leadTransferee := m.From
+ lastLeadTransferee := r.leadTransferee
+ if lastLeadTransferee != None {
+ if lastLeadTransferee == leadTransferee {
+ r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
+ r.id, r.Term, leadTransferee, leadTransferee)
+ return nil
+ }
+ r.abortLeaderTransfer()
+ r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
+ }
+ if leadTransferee == r.id {
+ r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
+ return nil
+ }
+ // Transfer leadership to third party.
+ r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
+ // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
+ r.electionElapsed = 0
+ r.leadTransferee = leadTransferee
+ if pr.Match == r.raftLog.lastIndex() {
+ r.sendTimeoutNow(leadTransferee)
+ r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
+ } else {
+ r.sendAppend(leadTransferee)
+ }
+ }
+ return nil
+}
+
+// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
+// whether they respond to MsgVoteResp or MsgPreVoteResp.
+func stepCandidate(r *raft, m pb.Message) error {
+ // Only handle vote responses corresponding to our candidacy (while in
+ // StateCandidate, we may get stale MsgPreVoteResp messages in this term from
+ // our pre-candidate state).
+ var myVoteRespType pb.MessageType
+ if r.state == StatePreCandidate {
+ myVoteRespType = pb.MsgPreVoteResp
+ } else {
+ myVoteRespType = pb.MsgVoteResp
+ }
+ switch m.Type {
+ case pb.MsgProp:
+ r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+ return ErrProposalDropped
+ case pb.MsgApp:
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+ r.handleAppendEntries(m)
+ case pb.MsgHeartbeat:
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+ r.handleHeartbeat(m)
+ case pb.MsgSnap:
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+ r.handleSnapshot(m)
+ case myVoteRespType:
+ gr := r.poll(m.From, m.Type, !m.Reject)
+ r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
+ switch r.quorum() {
+ case gr:
+ if r.state == StatePreCandidate {
+ r.campaign(campaignElection)
+ } else {
+ r.becomeLeader()
+ r.bcastAppend()
+ }
+ case len(r.votes) - gr:
+ // pb.MsgPreVoteResp contains future term of pre-candidate
+ // m.Term > r.Term; reuse r.Term
+ r.becomeFollower(r.Term, None)
+ }
+ case pb.MsgTimeoutNow:
+ r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
+ }
+ return nil
+}
+
+func stepFollower(r *raft, m pb.Message) error {
+ switch m.Type {
+ case pb.MsgProp:
+ if r.lead == None {
+ r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+ return ErrProposalDropped
+ } else if r.disableProposalForwarding {
+ r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
+ return ErrProposalDropped
+ }
+ m.To = r.lead
+ r.send(m)
+ case pb.MsgApp:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleAppendEntries(m)
+ case pb.MsgHeartbeat:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleHeartbeat(m)
+ case pb.MsgSnap:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleSnapshot(m)
+ case pb.MsgTransferLeader:
+ if r.lead == None {
+ r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
+ return nil
+ }
+ m.To = r.lead
+ r.send(m)
+ case pb.MsgTimeoutNow:
+ if r.promotable() {
+ r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
+ // Leadership transfers never use pre-vote even if r.preVote is true; we
+ // know we are not recovering from a partition so there is no need for the
+ // extra round trip.
+ r.campaign(campaignTransfer)
+ } else {
+ r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
+ }
+ case pb.MsgReadIndex:
+ if r.lead == None {
+ r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
+ return nil
+ }
+ m.To = r.lead
+ r.send(m)
+ case pb.MsgReadIndexResp:
+ if len(m.Entries) != 1 {
+ r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
+ return nil
+ }
+ r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
+ }
+ return nil
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+ if m.Index < r.raftLog.committed {
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+ return
+ }
+
+ if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+ } else {
+ r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
+ r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
+ }
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+ r.raftLog.commitTo(m.Commit)
+ r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+ sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
+ if r.restore(m.Snapshot) {
+ r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, sindex, sterm)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+ } else {
+ r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, sindex, sterm)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+ }
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine.
+func (r *raft) restore(s pb.Snapshot) bool {
+ if s.Metadata.Index <= r.raftLog.committed {
+ return false
+ }
+ if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
+ r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+ r.raftLog.commitTo(s.Metadata.Index)
+ return false
+ }
+
+ // The normal peer can't become learner.
+ if !r.isLearner {
+ for _, id := range s.Metadata.ConfState.Learners {
+ if id == r.id {
+ r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
+ return false
+ }
+ }
+ }
+
+ r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+
+ r.raftLog.restore(s)
+ r.prs = make(map[uint64]*Progress)
+ r.learnerPrs = make(map[uint64]*Progress)
+ r.restoreNode(s.Metadata.ConfState.Nodes, false)
+ r.restoreNode(s.Metadata.ConfState.Learners, true)
+ return true
+}
+
+func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
+ for _, n := range nodes {
+ match, next := uint64(0), r.raftLog.lastIndex()+1
+ if n == r.id {
+ match = next - 1
+ r.isLearner = isLearner
+ }
+ r.setProgress(n, match, next, isLearner)
+ r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
+ }
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+ _, ok := r.prs[r.id]
+ return ok
+}
+
+func (r *raft) addNode(id uint64) {
+ r.addNodeOrLearnerNode(id, false)
+}
+
+func (r *raft) addLearner(id uint64) {
+ r.addNodeOrLearnerNode(id, true)
+}
+
+func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
+ pr := r.getProgress(id)
+ if pr == nil {
+ r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
+ } else {
+ if isLearner && !pr.IsLearner {
+ // can only change Learner to Voter
+ r.logger.Infof("%x ignored addLearner: do not support changing %x from raft peer to learner.", r.id, id)
+ return
+ }
+
+ if isLearner == pr.IsLearner {
+ // Ignore any redundant addNode calls (which can happen because the
+ // initial bootstrapping entries are applied twice).
+ return
+ }
+
+ // change Learner to Voter, use origin Learner progress
+ delete(r.learnerPrs, id)
+ pr.IsLearner = false
+ r.prs[id] = pr
+ }
+
+ if r.id == id {
+ r.isLearner = isLearner
+ }
+
+ // When a node is first added, we should mark it as recently active.
+ // Otherwise, CheckQuorum may cause us to step down if it is invoked
+ // before the added node has a chance to communicate with us.
+ pr = r.getProgress(id)
+ pr.RecentActive = true
+}
+
+func (r *raft) removeNode(id uint64) {
+ r.delProgress(id)
+
+ // do not try to commit or abort transferring if there is no nodes in the cluster.
+ if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
+ return
+ }
+
+ // The quorum size is now smaller, so see if any pending entries can
+ // be committed.
+ if r.maybeCommit() {
+ r.bcastAppend()
+ }
+ // If the removed node is the leadTransferee, then abort the leadership transferring.
+ if r.state == StateLeader && r.leadTransferee == id {
+ r.abortLeaderTransfer()
+ }
+}
+
+func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
+ if !isLearner {
+ delete(r.learnerPrs, id)
+ r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+ return
+ }
+
+ if _, ok := r.prs[id]; ok {
+ panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
+ }
+ r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
+}
+
+func (r *raft) delProgress(id uint64) {
+ delete(r.prs, id)
+ delete(r.learnerPrs, id)
+}
+
+func (r *raft) loadState(state pb.HardState) {
+ if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+ r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+ }
+ r.raftLog.committed = state.Commit
+ r.Term = state.Term
+ r.Vote = state.Vote
+}
+
+// pastElectionTimeout returns true iff r.electionElapsed is greater
+// than or equal to the randomized election timeout in
+// [electiontimeout, 2 * electiontimeout - 1].
+func (r *raft) pastElectionTimeout() bool {
+ return r.electionElapsed >= r.randomizedElectionTimeout
+}
+
+func (r *raft) resetRandomizedElectionTimeout() {
+ r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
+}
+
+// checkQuorumActive returns true if the quorum is active from
+// the view of the local raft state machine. Otherwise, it returns
+// false.
+// checkQuorumActive also resets all RecentActive to false.
+func (r *raft) checkQuorumActive() bool {
+ var act int
+
+ r.forEachProgress(func(id uint64, pr *Progress) {
+ if id == r.id { // self is always active
+ act++
+ return
+ }
+
+ if pr.RecentActive && !pr.IsLearner {
+ act++
+ }
+
+ pr.RecentActive = false
+ })
+
+ return act >= r.quorum()
+}
+
+func (r *raft) sendTimeoutNow(to uint64) {
+ r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
+}
+
+func (r *raft) abortLeaderTransfer() {
+ r.leadTransferee = None
+}
+
+// increaseUncommittedSize computes the size of the proposed entries and
+// determines whether they would push leader over its maxUncommittedSize limit.
+// If the new entries would exceed the limit, the method returns false. If not,
+// the increase in uncommitted entry size is recorded and the method returns
+// true.
+func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool {
+ var s uint64
+ for _, e := range ents {
+ s += uint64(PayloadSize(e))
+ }
+
+ if r.uncommittedSize > 0 && r.uncommittedSize+s > r.maxUncommittedSize {
+ // If the uncommitted tail of the Raft log is empty, allow any size
+ // proposal. Otherwise, limit the size of the uncommitted tail of the
+ // log and drop any proposal that would push the size over the limit.
+ return false
+ }
+ r.uncommittedSize += s
+ return true
+}
+
+// reduceUncommittedSize accounts for the newly committed entries by decreasing
+// the uncommitted entry size limit.
+func (r *raft) reduceUncommittedSize(ents []pb.Entry) {
+ if r.uncommittedSize == 0 {
+ // Fast-path for followers, who do not track or enforce the limit.
+ return
+ }
+
+ var s uint64
+ for _, e := range ents {
+ s += uint64(PayloadSize(e))
+ }
+ if s > r.uncommittedSize {
+ // uncommittedSize may underestimate the size of the uncommitted Raft
+ // log tail but will never overestimate it. Saturate at 0 instead of
+ // allowing overflow.
+ r.uncommittedSize = 0
+ } else {
+ r.uncommittedSize -= s
+ }
+}
+
+func numOfPendingConf(ents []pb.Entry) int {
+ n := 0
+ for i := range ents {
+ if ents[i].Type == pb.EntryConfChange {
+ n++
+ }
+ }
+ return n
+}
diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go
new file mode 100644
index 0000000..fd9ee37
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.pb.go
@@ -0,0 +1,2004 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft.proto
+
+/*
+ Package raftpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ raft.proto
+
+ It has these top-level messages:
+ Entry
+ SnapshotMetadata
+ Snapshot
+ Message
+ HardState
+ ConfState
+ ConfChange
+*/
+package raftpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type EntryType int32
+
+const (
+ EntryNormal EntryType = 0
+ EntryConfChange EntryType = 1
+)
+
+var EntryType_name = map[int32]string{
+ 0: "EntryNormal",
+ 1: "EntryConfChange",
+}
+var EntryType_value = map[string]int32{
+ "EntryNormal": 0,
+ "EntryConfChange": 1,
+}
+
+func (x EntryType) Enum() *EntryType {
+ p := new(EntryType)
+ *p = x
+ return p
+}
+func (x EntryType) String() string {
+ return proto.EnumName(EntryType_name, int32(x))
+}
+func (x *EntryType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
+ if err != nil {
+ return err
+ }
+ *x = EntryType(value)
+ return nil
+}
+func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} }
+
+type MessageType int32
+
+const (
+ MsgHup MessageType = 0
+ MsgBeat MessageType = 1
+ MsgProp MessageType = 2
+ MsgApp MessageType = 3
+ MsgAppResp MessageType = 4
+ MsgVote MessageType = 5
+ MsgVoteResp MessageType = 6
+ MsgSnap MessageType = 7
+ MsgHeartbeat MessageType = 8
+ MsgHeartbeatResp MessageType = 9
+ MsgUnreachable MessageType = 10
+ MsgSnapStatus MessageType = 11
+ MsgCheckQuorum MessageType = 12
+ MsgTransferLeader MessageType = 13
+ MsgTimeoutNow MessageType = 14
+ MsgReadIndex MessageType = 15
+ MsgReadIndexResp MessageType = 16
+ MsgPreVote MessageType = 17
+ MsgPreVoteResp MessageType = 18
+)
+
+var MessageType_name = map[int32]string{
+ 0: "MsgHup",
+ 1: "MsgBeat",
+ 2: "MsgProp",
+ 3: "MsgApp",
+ 4: "MsgAppResp",
+ 5: "MsgVote",
+ 6: "MsgVoteResp",
+ 7: "MsgSnap",
+ 8: "MsgHeartbeat",
+ 9: "MsgHeartbeatResp",
+ 10: "MsgUnreachable",
+ 11: "MsgSnapStatus",
+ 12: "MsgCheckQuorum",
+ 13: "MsgTransferLeader",
+ 14: "MsgTimeoutNow",
+ 15: "MsgReadIndex",
+ 16: "MsgReadIndexResp",
+ 17: "MsgPreVote",
+ 18: "MsgPreVoteResp",
+}
+var MessageType_value = map[string]int32{
+ "MsgHup": 0,
+ "MsgBeat": 1,
+ "MsgProp": 2,
+ "MsgApp": 3,
+ "MsgAppResp": 4,
+ "MsgVote": 5,
+ "MsgVoteResp": 6,
+ "MsgSnap": 7,
+ "MsgHeartbeat": 8,
+ "MsgHeartbeatResp": 9,
+ "MsgUnreachable": 10,
+ "MsgSnapStatus": 11,
+ "MsgCheckQuorum": 12,
+ "MsgTransferLeader": 13,
+ "MsgTimeoutNow": 14,
+ "MsgReadIndex": 15,
+ "MsgReadIndexResp": 16,
+ "MsgPreVote": 17,
+ "MsgPreVoteResp": 18,
+}
+
+func (x MessageType) Enum() *MessageType {
+ p := new(MessageType)
+ *p = x
+ return p
+}
+func (x MessageType) String() string {
+ return proto.EnumName(MessageType_name, int32(x))
+}
+func (x *MessageType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
+ if err != nil {
+ return err
+ }
+ *x = MessageType(value)
+ return nil
+}
+func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} }
+
+type ConfChangeType int32
+
+const (
+ ConfChangeAddNode ConfChangeType = 0
+ ConfChangeRemoveNode ConfChangeType = 1
+ ConfChangeUpdateNode ConfChangeType = 2
+ ConfChangeAddLearnerNode ConfChangeType = 3
+)
+
+var ConfChangeType_name = map[int32]string{
+ 0: "ConfChangeAddNode",
+ 1: "ConfChangeRemoveNode",
+ 2: "ConfChangeUpdateNode",
+ 3: "ConfChangeAddLearnerNode",
+}
+var ConfChangeType_value = map[string]int32{
+ "ConfChangeAddNode": 0,
+ "ConfChangeRemoveNode": 1,
+ "ConfChangeUpdateNode": 2,
+ "ConfChangeAddLearnerNode": 3,
+}
+
+func (x ConfChangeType) Enum() *ConfChangeType {
+ p := new(ConfChangeType)
+ *p = x
+ return p
+}
+func (x ConfChangeType) String() string {
+ return proto.EnumName(ConfChangeType_name, int32(x))
+}
+func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
+ if err != nil {
+ return err
+ }
+ *x = ConfChangeType(value)
+ return nil
+}
+func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} }
+
+type Entry struct {
+ Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
+ Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
+ Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+ Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} }
+
+type SnapshotMetadata struct {
+ ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
+ Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage() {}
+func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} }
+
+type Snapshot struct {
+ Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+ Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} }
+
+type Message struct {
+ Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+ To uint64 `protobuf:"varint,2,opt,name=to" json:"to"`
+ From uint64 `protobuf:"varint,3,opt,name=from" json:"from"`
+ Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"`
+ LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+ Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"`
+ Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+ Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"`
+ Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
+ Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"`
+ RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+ Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} }
+
+type HardState struct {
+ Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
+ Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
+ Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *HardState) Reset() { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage() {}
+func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} }
+
+type ConfState struct {
+ Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
+ Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConfState) Reset() { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage() {}
+func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} }
+
+type ConfChange struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
+ Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
+ NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
+ Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ConfChange) Reset() { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage() {}
+func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} }
+
+func init() {
+ proto.RegisterType((*Entry)(nil), "raftpb.Entry")
+ proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
+ proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
+ proto.RegisterType((*Message)(nil), "raftpb.Message")
+ proto.RegisterType((*HardState)(nil), "raftpb.HardState")
+ proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
+ proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
+ proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+ proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+ proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
+}
+func (m *Entry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+ if m.Data != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+ i += copy(dAtA[i:], m.Data)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size()))
+ n1, err := m.ConfState.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Data != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+ i += copy(dAtA[i:], m.Data)
+ }
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size()))
+ n2, err := m.Metadata.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Message) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Message) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.To))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.From))
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ dAtA[i] = 0x28
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
+ dAtA[i] = 0x30
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+ if len(m.Entries) > 0 {
+ for _, msg := range m.Entries {
+ dAtA[i] = 0x3a
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ dAtA[i] = 0x40
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+ dAtA[i] = 0x4a
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size()))
+ n3, err := m.Snapshot.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ dAtA[i] = 0x50
+ i++
+ if m.Reject {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ dAtA[i] = 0x58
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
+ if m.Context != nil {
+ dAtA[i] = 0x62
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+ i += copy(dAtA[i:], m.Context)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *HardState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *ConfState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, num := range m.Nodes {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(num))
+ }
+ }
+ if len(m.Learners) > 0 {
+ for _, num := range m.Learners {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(num))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *ConfChange) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.ID))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
+ if m.Context != nil {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+ i += copy(dAtA[i:], m.Context)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Entry) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.Index))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SnapshotMetadata) Size() (n int) {
+ var l int
+ _ = l
+ l = m.ConfState.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ n += 1 + sovRaft(uint64(m.Index))
+ n += 1 + sovRaft(uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Snapshot) Size() (n int) {
+ var l int
+ _ = l
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ l = m.Metadata.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Message) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.To))
+ n += 1 + sovRaft(uint64(m.From))
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.LogTerm))
+ n += 1 + sovRaft(uint64(m.Index))
+ if len(m.Entries) > 0 {
+ for _, e := range m.Entries {
+ l = e.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ }
+ n += 1 + sovRaft(uint64(m.Commit))
+ l = m.Snapshot.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ n += 2
+ n += 1 + sovRaft(uint64(m.RejectHint))
+ if m.Context != nil {
+ l = len(m.Context)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *HardState) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.Vote))
+ n += 1 + sovRaft(uint64(m.Commit))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ConfState) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Nodes) > 0 {
+ for _, e := range m.Nodes {
+ n += 1 + sovRaft(uint64(e))
+ }
+ }
+ if len(m.Learners) > 0 {
+ for _, e := range m.Learners {
+ n += 1 + sovRaft(uint64(e))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ConfChange) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.ID))
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.NodeID))
+ if m.Context != nil {
+ l = len(m.Context)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovRaft(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRaft(x uint64) (n int) {
+ return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Entry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (EntryType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Message) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Message: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (MessageType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ m.To = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.To |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ m.From = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.From |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
+ }
+ m.LogTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.LogTerm |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Entries = append(m.Entries, Entry{})
+ if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ m.Commit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Commit |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Reject = bool(v != 0)
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
+ }
+ m.RejectHint = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RejectHint |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+ if m.Context == nil {
+ m.Context = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HardState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HardState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+ }
+ m.Vote = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Vote |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ m.Commit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Commit |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Nodes = append(m.Nodes, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Nodes = append(m.Nodes, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+ }
+ case 2:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Learners = append(m.Learners, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Learners = append(m.Learners, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfChange) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (ConfChangeType(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ m.NodeID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NodeID |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+ if m.Context == nil {
+ m.Context = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRaft(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRaft
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRaft(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) }
+
+var fileDescriptorRaft = []byte{
+ // 815 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45,
+ 0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38,
+ 0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b,
+ 0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
+ 0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3,
+ 0x63, 0xcf, 0x24, 0xb7, 0xae, 0xef, 0xab, 0xae, 0xfa, 0xea, 0xeb, 0x9a, 0x01, 0x50, 0x62, 0xa9,
+ 0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f,
+ 0x9b, 0x53, 0xc1, 0x4e, 0x7e, 0x83, 0xd6, 0xb7, 0xa9, 0x56, 0x77, 0xf8, 0x19, 0x04, 0x57, 0x77,
+ 0x19, 0x71, 0x6f, 0xec, 0x4d, 0x07, 0xc7, 0x7b, 0x47, 0xc5, 0xad, 0x23, 0x4b, 0x1a, 0xe2, 0x24,
+ 0xb8, 0xff, 0xe7, 0x93, 0xc6, 0xdc, 0x26, 0x21, 0x87, 0xe0, 0x8a, 0x54, 0xc2, 0xfd, 0xb1, 0x37,
+ 0x0d, 0x36, 0x0c, 0xa9, 0x04, 0xf7, 0xa1, 0x75, 0x91, 0x46, 0xf4, 0x8e, 0x37, 0x2b, 0x54, 0x01,
+ 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd, 0x03,
+ 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0, 0x42,
+ 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xa1, 0x28, 0xdc, 0x2a, 0x3a, 0x95, 0xe9, 0xf2, 0xb5, 0x21,
+ 0x5c, 0xf1, 0xde, 0xa2, 0x04, 0x4c, 0xf3, 0x95, 0x6d, 0x5e, 0xd5, 0x55, 0x40, 0x46, 0xb2, 0x36,
+ 0x92, 0xab, 0xba, 0x2c, 0x32, 0xf9, 0x01, 0xba, 0xa5, 0x02, 0x23, 0xd1, 0x28, 0xb0, 0x3d, 0xfb,
+ 0x73, 0x7b, 0xc6, 0xaf, 0xa1, 0x9b, 0x38, 0x65, 0xb6, 0x70, 0x78, 0xcc, 0x4b, 0x2d, 0x8f, 0x95,
+ 0xbb, 0xba, 0x9b, 0xfc, 0xc9, 0x5f, 0x4d, 0xe8, 0xcc, 0x28, 0xcf, 0x45, 0x4c, 0xf8, 0x1c, 0x02,
+ 0xbd, 0x75, 0xf8, 0x59, 0x59, 0xc3, 0xd1, 0x55, 0x8f, 0x4d, 0x1a, 0x0e, 0xc1, 0xd7, 0xb2, 0x36,
+ 0x89, 0xaf, 0xa5, 0x19, 0x63, 0xa9, 0xe4, 0xa3, 0x31, 0x0c, 0xb2, 0x19, 0x30, 0x78, 0x3c, 0x20,
+ 0x8e, 0xa0, 0x73, 0x2b, 0x63, 0xfb, 0x60, 0xad, 0x0a, 0x59, 0x82, 0x5b, 0xdb, 0xda, 0x4f, 0x6d,
+ 0x7b, 0x0e, 0x1d, 0x4a, 0xb5, 0x5a, 0x51, 0xce, 0x3b, 0xe3, 0xe6, 0x34, 0x3c, 0xde, 0xa9, 0x6d,
+ 0x46, 0x59, 0xca, 0xe5, 0xe0, 0x01, 0xb4, 0x17, 0x32, 0x49, 0x56, 0x9a, 0x77, 0x2b, 0xb5, 0x1c,
+ 0x86, 0xc7, 0xd0, 0xcd, 0x9d, 0x63, 0xbc, 0x67, 0x9d, 0x64, 0x8f, 0x9d, 0x2c, 0x1d, 0x2c, 0xf3,
+ 0x4c, 0x45, 0x45, 0x3f, 0xd3, 0x42, 0x73, 0x18, 0x7b, 0xd3, 0x6e, 0x59, 0xb1, 0xc0, 0xf0, 0x53,
+ 0x80, 0xe2, 0x74, 0xbe, 0x4a, 0x35, 0x0f, 0x2b, 0x3d, 0x2b, 0x38, 0x72, 0xe8, 0x2c, 0x64, 0xaa,
+ 0xe9, 0x9d, 0xe6, 0x7d, 0xfb, 0xb0, 0x65, 0x38, 0xf9, 0x11, 0x7a, 0xe7, 0x42, 0x45, 0xc5, 0xfa,
+ 0x94, 0x0e, 0x7a, 0x4f, 0x1c, 0xe4, 0x10, 0xbc, 0x95, 0x9a, 0xea, 0xfb, 0x6e, 0x90, 0xca, 0xc0,
+ 0xcd, 0xa7, 0x03, 0x4f, 0xbe, 0x81, 0xde, 0x66, 0x5d, 0x71, 0x08, 0xad, 0x54, 0x46, 0x94, 0x73,
+ 0x6f, 0xdc, 0x9c, 0x06, 0xf3, 0x22, 0xc0, 0x7d, 0xe8, 0xde, 0x92, 0x50, 0x29, 0xa9, 0x9c, 0xfb,
+ 0x96, 0xd8, 0xc4, 0x93, 0x3f, 0x3c, 0x00, 0x73, 0xff, 0xf4, 0x46, 0xa4, 0xb1, 0xdd, 0x88, 0x8b,
+ 0xb3, 0x9a, 0x3a, 0xff, 0xe2, 0x0c, 0xbf, 0x70, 0x1f, 0xae, 0x6f, 0xd7, 0xea, 0xe3, 0xea, 0x67,
+ 0x52, 0xdc, 0x7b, 0xf2, 0xf5, 0x1e, 0x40, 0xfb, 0x52, 0x46, 0x74, 0x71, 0x56, 0xd7, 0x5c, 0x60,
+ 0xc6, 0xac, 0x53, 0x67, 0x56, 0xf1, 0xa1, 0x96, 0xe1, 0xe1, 0x97, 0xd0, 0xdb, 0xfc, 0x0e, 0x70,
+ 0x17, 0x42, 0x1b, 0x5c, 0x4a, 0x95, 0x88, 0x5b, 0xd6, 0xc0, 0x67, 0xb0, 0x6b, 0x81, 0x6d, 0x63,
+ 0xe6, 0x1d, 0xfe, 0xed, 0x43, 0x58, 0x59, 0x70, 0x04, 0x68, 0xcf, 0xf2, 0xf8, 0x7c, 0x9d, 0xb1,
+ 0x06, 0x86, 0xd0, 0x99, 0xe5, 0xf1, 0x09, 0x09, 0xcd, 0x3c, 0x17, 0xbc, 0x52, 0x32, 0x63, 0xbe,
+ 0xcb, 0x7a, 0x91, 0x65, 0xac, 0x89, 0x03, 0x80, 0xe2, 0x3c, 0xa7, 0x3c, 0x63, 0x81, 0x4b, 0xfc,
+ 0x5e, 0x6a, 0x62, 0x2d, 0x23, 0xc2, 0x05, 0x96, 0x6d, 0x3b, 0xd6, 0x2c, 0x13, 0xeb, 0x20, 0x83,
+ 0xbe, 0x69, 0x46, 0x42, 0xe9, 0x6b, 0xd3, 0xa5, 0x8b, 0x43, 0x60, 0x55, 0xc4, 0x5e, 0xea, 0x21,
+ 0xc2, 0x60, 0x96, 0xc7, 0x6f, 0x52, 0x45, 0x62, 0x71, 0x23, 0xae, 0x6f, 0x89, 0x01, 0xee, 0xc1,
+ 0x8e, 0x2b, 0x64, 0x1e, 0x6f, 0x9d, 0xb3, 0xd0, 0xa5, 0x9d, 0xde, 0xd0, 0xe2, 0x97, 0xef, 0xd6,
+ 0x52, 0xad, 0x13, 0xd6, 0xc7, 0x8f, 0x60, 0x6f, 0x96, 0xc7, 0x57, 0x4a, 0xa4, 0xf9, 0x92, 0xd4,
+ 0x4b, 0x12, 0x11, 0x29, 0xb6, 0xe3, 0x6e, 0x5f, 0xad, 0x12, 0x92, 0x6b, 0x7d, 0x29, 0x7f, 0x65,
+ 0x03, 0x27, 0x66, 0x4e, 0x22, 0xb2, 0x3f, 0x43, 0xb6, 0xeb, 0xc4, 0x6c, 0x10, 0x2b, 0x86, 0xb9,
+ 0x79, 0x5f, 0x29, 0xb2, 0x23, 0xee, 0xb9, 0xae, 0x2e, 0xb6, 0x39, 0x78, 0x78, 0x07, 0x83, 0xfa,
+ 0xf3, 0x1a, 0x1d, 0x5b, 0xe4, 0x45, 0x14, 0x99, 0xb7, 0x64, 0x0d, 0xe4, 0x30, 0xdc, 0xc2, 0x73,
+ 0x4a, 0xe4, 0x5b, 0xb2, 0x8c, 0x57, 0x67, 0xde, 0x64, 0x91, 0xd0, 0x05, 0xe3, 0xe3, 0x01, 0xf0,
+ 0x5a, 0xa9, 0x97, 0xc5, 0x36, 0x5a, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f, 0x8c,
+ 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf, 0xa8,
+ 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0x52, 0x5b, 0xe0, 0x74, 0x06, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto
new file mode 100644
index 0000000..644ce7b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/raftpb/raft.proto
@@ -0,0 +1,95 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+enum EntryType {
+ EntryNormal = 0;
+ EntryConfChange = 1;
+}
+
+message Entry {
+ optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+ optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+ optional EntryType Type = 1 [(gogoproto.nullable) = false];
+ optional bytes Data = 4;
+}
+
+message SnapshotMetadata {
+ optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+ optional uint64 index = 2 [(gogoproto.nullable) = false];
+ optional uint64 term = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+ optional bytes data = 1;
+ optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+enum MessageType {
+ MsgHup = 0;
+ MsgBeat = 1;
+ MsgProp = 2;
+ MsgApp = 3;
+ MsgAppResp = 4;
+ MsgVote = 5;
+ MsgVoteResp = 6;
+ MsgSnap = 7;
+ MsgHeartbeat = 8;
+ MsgHeartbeatResp = 9;
+ MsgUnreachable = 10;
+ MsgSnapStatus = 11;
+ MsgCheckQuorum = 12;
+ MsgTransferLeader = 13;
+ MsgTimeoutNow = 14;
+ MsgReadIndex = 15;
+ MsgReadIndexResp = 16;
+ MsgPreVote = 17;
+ MsgPreVoteResp = 18;
+}
+
+message Message {
+ optional MessageType type = 1 [(gogoproto.nullable) = false];
+ optional uint64 to = 2 [(gogoproto.nullable) = false];
+ optional uint64 from = 3 [(gogoproto.nullable) = false];
+ optional uint64 term = 4 [(gogoproto.nullable) = false];
+ optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
+ optional uint64 index = 6 [(gogoproto.nullable) = false];
+ repeated Entry entries = 7 [(gogoproto.nullable) = false];
+ optional uint64 commit = 8 [(gogoproto.nullable) = false];
+ optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
+ optional bool reject = 10 [(gogoproto.nullable) = false];
+ optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
+ optional bytes context = 12;
+}
+
+message HardState {
+ optional uint64 term = 1 [(gogoproto.nullable) = false];
+ optional uint64 vote = 2 [(gogoproto.nullable) = false];
+ optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+message ConfState {
+ repeated uint64 nodes = 1;
+ repeated uint64 learners = 2;
+}
+
+enum ConfChangeType {
+ ConfChangeAddNode = 0;
+ ConfChangeRemoveNode = 1;
+ ConfChangeUpdateNode = 2;
+ ConfChangeAddLearnerNode = 3;
+}
+
+message ConfChange {
+ optional uint64 ID = 1 [(gogoproto.nullable) = false];
+ optional ConfChangeType Type = 2 [(gogoproto.nullable) = false];
+ optional uint64 NodeID = 3 [(gogoproto.nullable) = false];
+ optional bytes Context = 4;
+}
diff --git a/vendor/go.etcd.io/etcd/raft/rawnode.go b/vendor/go.etcd.io/etcd/raft/rawnode.go
new file mode 100644
index 0000000..d7a272d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/rawnode.go
@@ -0,0 +1,295 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+
+ pb "go.etcd.io/etcd/raft/raftpb"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.prs for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+ raft *raft
+ prevSoftSt *SoftState
+ prevHardSt pb.HardState
+}
+
+func (rn *RawNode) newReady() Ready {
+ return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
+}
+
+func (rn *RawNode) commitReady(rd Ready) {
+ if rd.SoftState != nil {
+ rn.prevSoftSt = rd.SoftState
+ }
+ if !IsEmptyHardState(rd.HardState) {
+ rn.prevHardSt = rd.HardState
+ }
+
+ // If entries were applied (or a snapshot), update our cursor for
+ // the next Ready. Note that if the current HardState contains a
+ // new Commit index, this does not mean that we're also applying
+ // all of the new entries due to commit pagination by size.
+ if index := rd.appliedCursor(); index > 0 {
+ rn.raft.raftLog.appliedTo(index)
+ }
+
+ if len(rd.Entries) > 0 {
+ e := rd.Entries[len(rd.Entries)-1]
+ rn.raft.raftLog.stableTo(e.Index, e.Term)
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
+ }
+ if len(rd.ReadStates) != 0 {
+ rn.raft.readStates = nil
+ }
+}
+
+// NewRawNode returns a new RawNode given configuration and a list of raft peers.
+func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
+ if config.ID == 0 {
+ panic("config.ID must not be zero")
+ }
+ r := newRaft(config)
+ rn := &RawNode{
+ raft: r,
+ }
+ lastIndex, err := config.Storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ // If the log is empty, this is a new RawNode (like StartNode); otherwise it's
+ // restoring an existing RawNode (like RestartNode).
+ // TODO(bdarnell): rethink RawNode initialization and whether the application needs
+ // to be able to tell us when it expects the RawNode to exist.
+ if lastIndex == 0 {
+ r.becomeFollower(1, None)
+ ents := make([]pb.Entry, len(peers))
+ for i, peer := range peers {
+ cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+ data, err := cc.Marshal()
+ if err != nil {
+ panic("unexpected marshal error")
+ }
+
+ ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+ }
+ r.raftLog.append(ents...)
+ r.raftLog.committed = uint64(len(ents))
+ for _, peer := range peers {
+ r.addNode(peer.ID)
+ }
+ }
+
+ // Set the initial hard and soft states after performing all initialization.
+ rn.prevSoftSt = r.softState()
+ if lastIndex == 0 {
+ rn.prevHardSt = emptyState
+ } else {
+ rn.prevHardSt = r.hardState()
+ }
+
+ return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+ rn.raft.tick()
+}
+
+// TickQuiesced advances the internal logical clock by a single tick without
+// performing any other state machine processing. It allows the caller to avoid
+// periodic heartbeats and elections when all of the peers in a Raft group are
+// known to be at the same state. Expected usage is to periodically invoke Tick
+// or TickQuiesced depending on whether the group is "active" or "quiesced".
+//
+// WARNING: Be very careful about using this method as it subverts the Raft
+// state machine. You should probably be using Tick instead.
+func (rn *RawNode) TickQuiesced() {
+ rn.raft.electionElapsed++
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgHup,
+ })
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgProp,
+ From: rn.raft.id,
+ Entries: []pb.Entry{
+ {Data: data},
+ }})
+}
+
+// ProposeConfChange proposes a config change.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
+ data, err := cc.Marshal()
+ if err != nil {
+ return err
+ }
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgProp,
+ Entries: []pb.Entry{
+ {Type: pb.EntryConfChange, Data: data},
+ },
+ })
+}
+
+// ApplyConfChange applies a config change to the local node.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+ if cc.NodeID == None {
+ return &pb.ConfState{Nodes: rn.raft.nodes(), Learners: rn.raft.learnerNodes()}
+ }
+ switch cc.Type {
+ case pb.ConfChangeAddNode:
+ rn.raft.addNode(cc.NodeID)
+ case pb.ConfChangeAddLearnerNode:
+ rn.raft.addLearner(cc.NodeID)
+ case pb.ConfChangeRemoveNode:
+ rn.raft.removeNode(cc.NodeID)
+ case pb.ConfChangeUpdateNode:
+ default:
+ panic("unexpected conf type")
+ }
+ return &pb.ConfState{Nodes: rn.raft.nodes(), Learners: rn.raft.learnerNodes()}
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+ // ignore unexpected local messages receiving over network
+ if IsLocalMsg(m.Type) {
+ return ErrStepLocalMsg
+ }
+ if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+ return rn.raft.Step(m)
+ }
+ return ErrStepPeerNotFound
+}
+
+// Ready returns the current point-in-time state of this RawNode.
+func (rn *RawNode) Ready() Ready {
+ rd := rn.newReady()
+ rn.raft.msgs = nil
+ rn.raft.reduceUncommittedSize(rd.CommittedEntries)
+ return rd
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+// Checking logic in this method should be consistent with Ready.containsUpdates().
+func (rn *RawNode) HasReady() bool {
+ r := rn.raft
+ if !r.softState().equal(rn.prevSoftSt) {
+ return true
+ }
+ if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+ return true
+ }
+ if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
+ return true
+ }
+ if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
+ return true
+ }
+ if len(r.readStates) != 0 {
+ return true
+ }
+ return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+func (rn *RawNode) Advance(rd Ready) {
+ rn.commitReady(rd)
+}
+
+// Status returns the current status of the given group.
+func (rn *RawNode) Status() *Status {
+ status := getStatus(rn.raft)
+ return &status
+}
+
+// StatusWithoutProgress returns a Status without populating the Progress field
+// (and returns the Status as a value to avoid forcing it onto the heap). This
+// is more performant if the Progress is not required. See WithProgress for an
+// allocation-free way to introspect the Progress.
+func (rn *RawNode) StatusWithoutProgress() Status {
+ return getStatusWithoutProgress(rn.raft)
+}
+
+// ProgressType indicates the type of replica a Progress corresponds to.
+type ProgressType byte
+
+const (
+ // ProgressTypePeer accompanies a Progress for a regular peer replica.
+ ProgressTypePeer ProgressType = iota
+ // ProgressTypeLearner accompanies a Progress for a learner replica.
+ ProgressTypeLearner
+)
+
+// WithProgress is a helper to introspect the Progress for this node and its
+// peers.
+func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr Progress)) {
+ for id, pr := range rn.raft.prs {
+ pr := *pr
+ pr.ins = nil
+ visitor(id, ProgressTypePeer, pr)
+ }
+ for id, pr := range rn.raft.learnerPrs {
+ pr := *pr
+ pr.ins = nil
+ visitor(id, ProgressTypeLearner, pr)
+ }
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+ rej := status == SnapshotFailure
+
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}
+
+// TransferLeader tries to transfer leadership to the given transferee.
+func (rn *RawNode) TransferLeader(transferee uint64) {
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
+}
+
+// ReadIndex requests a read state. The read state will be set in ready.
+// Read State has a read index. Once the application advances further than the read
+// index, any linearizable read requests issued before the read request can be
+// processed safely. The read state will have the same rctx attached.
+func (rn *RawNode) ReadIndex(rctx []byte) {
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/go.etcd.io/etcd/raft/read_only.go b/vendor/go.etcd.io/etcd/raft/read_only.go
new file mode 100644
index 0000000..aecc6b2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/read_only.go
@@ -0,0 +1,118 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "go.etcd.io/etcd/raft/raftpb"
+
+// ReadState provides state for read only query.
+// It's caller's responsibility to call ReadIndex first before getting
+// this state from ready, it's also caller's duty to differentiate if this
+// state is what it requests through RequestCtx, eg. given a unique id as
+// RequestCtx
+type ReadState struct {
+ Index uint64
+ RequestCtx []byte
+}
+
+type readIndexStatus struct {
+ req pb.Message
+ index uint64
+ acks map[uint64]struct{}
+}
+
+type readOnly struct {
+ option ReadOnlyOption
+ pendingReadIndex map[string]*readIndexStatus
+ readIndexQueue []string
+}
+
+func newReadOnly(option ReadOnlyOption) *readOnly {
+ return &readOnly{
+ option: option,
+ pendingReadIndex: make(map[string]*readIndexStatus),
+ }
+}
+
+// addRequest adds a read only reuqest into readonly struct.
+// `index` is the commit index of the raft state machine when it received
+// the read only request.
+// `m` is the original read only request message from the local or remote node.
+func (ro *readOnly) addRequest(index uint64, m pb.Message) {
+ ctx := string(m.Entries[0].Data)
+ if _, ok := ro.pendingReadIndex[ctx]; ok {
+ return
+ }
+ ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
+ ro.readIndexQueue = append(ro.readIndexQueue, ctx)
+}
+
+// recvAck notifies the readonly struct that the raft state machine received
+// an acknowledgment of the heartbeat that attached with the read only request
+// context.
+func (ro *readOnly) recvAck(m pb.Message) int {
+ rs, ok := ro.pendingReadIndex[string(m.Context)]
+ if !ok {
+ return 0
+ }
+
+ rs.acks[m.From] = struct{}{}
+ // add one to include an ack from local node
+ return len(rs.acks) + 1
+}
+
+// advance advances the read only request queue kept by the readonly struct.
+// It dequeues the requests until it finds the read only request that has
+// the same context as the given `m`.
+func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
+ var (
+ i int
+ found bool
+ )
+
+ ctx := string(m.Context)
+ rss := []*readIndexStatus{}
+
+ for _, okctx := range ro.readIndexQueue {
+ i++
+ rs, ok := ro.pendingReadIndex[okctx]
+ if !ok {
+ panic("cannot find corresponding read state from pending map")
+ }
+ rss = append(rss, rs)
+ if okctx == ctx {
+ found = true
+ break
+ }
+ }
+
+ if found {
+ ro.readIndexQueue = ro.readIndexQueue[i:]
+ for _, rs := range rss {
+ delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
+ }
+ return rss
+ }
+
+ return nil
+}
+
+// lastPendingRequestCtx returns the context of the last pending read only
+// request in readonly struct.
+func (ro *readOnly) lastPendingRequestCtx() string {
+ if len(ro.readIndexQueue) == 0 {
+ return ""
+ }
+ return ro.readIndexQueue[len(ro.readIndexQueue)-1]
+}
diff --git a/vendor/go.etcd.io/etcd/raft/status.go b/vendor/go.etcd.io/etcd/raft/status.go
new file mode 100644
index 0000000..9feca7c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/status.go
@@ -0,0 +1,94 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+
+ pb "go.etcd.io/etcd/raft/raftpb"
+)
+
+type Status struct {
+ ID uint64
+
+ pb.HardState
+ SoftState
+
+ Applied uint64
+ Progress map[uint64]Progress
+
+ LeadTransferee uint64
+}
+
+func getProgressCopy(r *raft) map[uint64]Progress {
+ prs := make(map[uint64]Progress)
+ for id, p := range r.prs {
+ prs[id] = *p
+ }
+
+ for id, p := range r.learnerPrs {
+ prs[id] = *p
+ }
+ return prs
+}
+
+func getStatusWithoutProgress(r *raft) Status {
+ s := Status{
+ ID: r.id,
+ LeadTransferee: r.leadTransferee,
+ }
+ s.HardState = r.hardState()
+ s.SoftState = *r.softState()
+ s.Applied = r.raftLog.applied
+ return s
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+ s := getStatusWithoutProgress(r)
+ if s.RaftState == StateLeader {
+ s.Progress = getProgressCopy(r)
+ }
+ return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+ j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
+ s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
+
+ if len(s.Progress) == 0 {
+ j += "},"
+ } else {
+ for k, v := range s.Progress {
+ subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+ j += subj
+ }
+ // remove the trailing ","
+ j = j[:len(j)-1] + "},"
+ }
+
+ j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
+ return []byte(j), nil
+}
+
+func (s Status) String() string {
+ b, err := s.MarshalJSON()
+ if err != nil {
+ raftLogger.Panicf("unexpected error: %v", err)
+ }
+ return string(b)
+}
diff --git a/vendor/go.etcd.io/etcd/raft/storage.go b/vendor/go.etcd.io/etcd/raft/storage.go
new file mode 100644
index 0000000..14ad686
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/storage.go
@@ -0,0 +1,271 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+ "sync"
+
+ pb "go.etcd.io/etcd/raft/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+// ErrUnavailable is returned by Storage interface when the requested log entries
+// are unavailable.
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
+// snapshot is temporarily unavailable.
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+ // InitialState returns the saved HardState and ConfState information.
+ InitialState() (pb.HardState, pb.ConfState, error)
+ // Entries returns a slice of log entries in the range [lo,hi).
+ // MaxSize limits the total size of the log entries returned, but
+ // Entries returns at least one entry if any.
+ Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+ // Term returns the term of entry i, which must be in the range
+ // [FirstIndex()-1, LastIndex()]. The term of the entry before
+ // FirstIndex is retained for matching purposes even though the
+ // rest of that entry may not be available.
+ Term(i uint64) (uint64, error)
+ // LastIndex returns the index of the last entry in the log.
+ LastIndex() (uint64, error)
+ // FirstIndex returns the index of the first log entry that is
+ // possibly available via Entries (older entries have been incorporated
+ // into the latest Snapshot; if storage only contains the dummy entry the
+ // first log entry is not available).
+ FirstIndex() (uint64, error)
+ // Snapshot returns the most recent snapshot.
+ // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+ // so raft state machine could know that Storage needs some time to prepare
+ // snapshot and call Snapshot later.
+ Snapshot() (pb.Snapshot, error)
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+ // Protects access to all fields. Most methods of MemoryStorage are
+ // run on the raft goroutine, but Append() is run on an application
+ // goroutine.
+ sync.Mutex
+
+ hardState pb.HardState
+ snapshot pb.Snapshot
+ // ents[i] has raft log position i+snapshot.Metadata.Index
+ ents []pb.Entry
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+ return &MemoryStorage{
+ // When starting from scratch populate the list with a dummy entry at term zero.
+ ents: make([]pb.Entry, 1),
+ }
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+ return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.hardState = st
+ return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ offset := ms.ents[0].Index
+ if lo <= offset {
+ return nil, ErrCompacted
+ }
+ if hi > ms.lastIndex()+1 {
+ raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+ }
+ // only contains dummy entries.
+ if len(ms.ents) == 1 {
+ return nil, ErrUnavailable
+ }
+
+ ents := ms.ents[lo-offset : hi-offset]
+ return limitSize(ents, maxSize), nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ offset := ms.ents[0].Index
+ if i < offset {
+ return 0, ErrCompacted
+ }
+ if int(i-offset) >= len(ms.ents) {
+ return 0, ErrUnavailable
+ }
+ return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+ return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+ return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+ ms.Lock()
+ defer ms.Unlock()
+
+ //handle check for old snapshot being applied
+ msIndex := ms.snapshot.Metadata.Index
+ snapIndex := snap.Metadata.Index
+ if msIndex >= snapIndex {
+ return ErrSnapOutOfDate
+ }
+
+ ms.snapshot = snap
+ ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+ return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ if i <= ms.snapshot.Metadata.Index {
+ return pb.Snapshot{}, ErrSnapOutOfDate
+ }
+
+ offset := ms.ents[0].Index
+ if i > ms.lastIndex() {
+ raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+ }
+
+ ms.snapshot.Metadata.Index = i
+ ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+ if cs != nil {
+ ms.snapshot.Metadata.ConfState = *cs
+ }
+ ms.snapshot.Data = data
+ return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+ ms.Lock()
+ defer ms.Unlock()
+ offset := ms.ents[0].Index
+ if compactIndex <= offset {
+ return ErrCompacted
+ }
+ if compactIndex > ms.lastIndex() {
+ raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+ }
+
+ i := compactIndex - offset
+ ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
+ ents[0].Index = ms.ents[i].Index
+ ents[0].Term = ms.ents[i].Term
+ ents = append(ents, ms.ents[i+1:]...)
+ ms.ents = ents
+ return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+ if len(entries) == 0 {
+ return nil
+ }
+
+ ms.Lock()
+ defer ms.Unlock()
+
+ first := ms.firstIndex()
+ last := entries[0].Index + uint64(len(entries)) - 1
+
+ // shortcut if there is no new entry.
+ if last < first {
+ return nil
+ }
+ // truncate compacted entries
+ if first > entries[0].Index {
+ entries = entries[first-entries[0].Index:]
+ }
+
+ offset := entries[0].Index - ms.ents[0].Index
+ switch {
+ case uint64(len(ms.ents)) > offset:
+ ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
+ ms.ents = append(ms.ents, entries...)
+ case uint64(len(ms.ents)) == offset:
+ ms.ents = append(ms.ents, entries...)
+ default:
+ raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
+ ms.lastIndex(), entries[0].Index)
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/raft/util.go b/vendor/go.etcd.io/etcd/raft/util.go
new file mode 100644
index 0000000..c145d26
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/raft/util.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "bytes"
+ "fmt"
+
+ pb "go.etcd.io/etcd/raft/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+// uint64Slice implements sort interface
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int { return len(p) }
+func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func min(a, b uint64) uint64 {
+ if a > b {
+ return b
+ }
+ return a
+}
+
+func max(a, b uint64) uint64 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func IsLocalMsg(msgt pb.MessageType) bool {
+ return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
+ msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
+}
+
+func IsResponseMsg(msgt pb.MessageType) bool {
+ return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
+}
+
+// voteResponseType maps vote and prevote message types to their corresponding responses.
+func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
+ switch msgt {
+ case pb.MsgVote:
+ return pb.MsgVoteResp
+ case pb.MsgPreVote:
+ return pb.MsgPreVoteResp
+ default:
+ panic(fmt.Sprintf("not a vote message: %s", msgt))
+ }
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
+ if m.Reject {
+ fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint)
+ }
+ if m.Commit != 0 {
+ fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+ }
+ if len(m.Entries) > 0 {
+ fmt.Fprintf(&buf, " Entries:[")
+ for i, e := range m.Entries {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(DescribeEntry(e, f))
+ }
+ fmt.Fprintf(&buf, "]")
+ }
+ if !IsEmptySnap(m.Snapshot) {
+ fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
+ }
+ return buf.String()
+}
+
+// PayloadSize is the size of the payload of this Entry. Notably, it does not
+// depend on its Index or Term.
+func PayloadSize(e pb.Entry) int {
+ return len(e.Data)
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+ var formatted string
+ if e.Type == pb.EntryNormal && f != nil {
+ formatted = f(e.Data)
+ } else {
+ formatted = fmt.Sprintf("%q", e.Data)
+ }
+ return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
+}
+
+// DescribeEntries calls DescribeEntry for each Entry, adding a newline to
+// each.
+func DescribeEntries(ents []pb.Entry, f EntryFormatter) string {
+ var buf bytes.Buffer
+ for _, e := range ents {
+ _, _ = buf.WriteString(DescribeEntry(e, f) + "\n")
+ }
+ return buf.String()
+}
+
+func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
+ if len(ents) == 0 {
+ return ents
+ }
+ size := ents[0].Size()
+ var limit int
+ for limit = 1; limit < len(ents); limit++ {
+ size += ents[limit].Size()
+ if uint64(size) > maxSize {
+ break
+ }
+ }
+ return ents[:limit]
+}