[VOL-3678] First implementation of the BBSim-sadis-server
Change-Id: I5077a8f861f4cc6af9759f31a4a415042c05eba3
diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS
new file mode 100644
index 0000000..3cf0364
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/OWNERS
@@ -0,0 +1,9 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+- sig-auth-certificates-approvers
+reviewers:
+- sig-auth-certificates-reviewers
+labels:
+- sig/auth
+
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
new file mode 100644
index 0000000..3da1441
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/cert.go
@@ -0,0 +1,206 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+ "bytes"
+ "crypto"
+ cryptorand "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "math/big"
+ "net"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "k8s.io/client-go/util/keyutil"
+)
+
+const duration365d = time.Hour * 24 * 365
+
+// Config contains the basic fields required for creating a certificate
+type Config struct {
+ CommonName string
+ Organization []string
+ AltNames AltNames
+ Usages []x509.ExtKeyUsage
+}
+
+// AltNames contains the domain names and IP addresses that will be added
+// to the API Server's x509 certificate SubAltNames field. The values will
+// be passed directly to the x509.Certificate object.
+type AltNames struct {
+ DNSNames []string
+ IPs []net.IP
+}
+
+// NewSelfSignedCACert creates a CA certificate
+func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
+ now := time.Now()
+ tmpl := x509.Certificate{
+ SerialNumber: new(big.Int).SetInt64(0),
+ Subject: pkix.Name{
+ CommonName: cfg.CommonName,
+ Organization: cfg.Organization,
+ },
+ NotBefore: now.UTC(),
+ NotAfter: now.Add(duration365d * 10).UTC(),
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
+ if err != nil {
+ return nil, err
+ }
+ return x509.ParseCertificate(certDERBytes)
+}
+
+// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name
+// You may also specify additional subject alt names (either ip or dns names) for the certificate.
+func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
+ return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
+}
+
+// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names)
+// for the certificate.
+//
+// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is:
+// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.crt
+// <host>_<ip>-<ip>_<alternateDNS>-<alternateDNS>.key
+// Certs/keys not existing in that directory are created.
+func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
+ validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
+ maxAge := time.Hour * 24 * 365 // one year self-signed certs
+
+ baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
+ certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
+ keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key")
+ if len(fixtureDirectory) > 0 {
+ cert, err := ioutil.ReadFile(certFixturePath)
+ if err == nil {
+ key, err := ioutil.ReadFile(keyFixturePath)
+ if err == nil {
+ return cert, key, nil
+ }
+ return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err)
+ }
+ maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures
+ }
+
+ caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ caTemplate := x509.Certificate{
+ SerialNumber: big.NewInt(1),
+ Subject: pkix.Name{
+ CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()),
+ },
+ NotBefore: validFrom,
+ NotAfter: validFrom.Add(maxAge),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ caCertificate, err := x509.ParseCertificate(caDERBytes)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ priv, err := rsa.GenerateKey(cryptorand.Reader, 2048)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ template := x509.Certificate{
+ SerialNumber: big.NewInt(2),
+ Subject: pkix.Name{
+ CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
+ },
+ NotBefore: validFrom,
+ NotAfter: validFrom.Add(maxAge),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ BasicConstraintsValid: true,
+ }
+
+ if ip := net.ParseIP(host); ip != nil {
+ template.IPAddresses = append(template.IPAddresses, ip)
+ } else {
+ template.DNSNames = append(template.DNSNames, host)
+ }
+
+ template.IPAddresses = append(template.IPAddresses, alternateIPs...)
+ template.DNSNames = append(template.DNSNames, alternateDNS...)
+
+ derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Generate cert, followed by ca
+ certBuffer := bytes.Buffer{}
+ if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil {
+ return nil, nil, err
+ }
+ if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil {
+ return nil, nil, err
+ }
+
+ // Generate key
+ keyBuffer := bytes.Buffer{}
+ if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
+ return nil, nil, err
+ }
+
+ if len(fixtureDirectory) > 0 {
+ if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
+ return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
+ }
+ if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil {
+ return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
+ }
+ }
+
+ return certBuffer.Bytes(), keyBuffer.Bytes(), nil
+}
+
+func ipsToStrings(ips []net.IP) []string {
+ ss := make([]string, 0, len(ips))
+ for _, ip := range ips {
+ ss = append(ss, ip.String())
+ }
+ return ss
+}
diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go
new file mode 100644
index 0000000..39a6751
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/csr.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+ cryptorand "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "net"
+)
+
+// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
+// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
+func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
+ template := &x509.CertificateRequest{
+ Subject: *subject,
+ DNSNames: dnsSANs,
+ IPAddresses: ipSANs,
+ }
+
+ return MakeCSRFromTemplate(privateKey, template)
+}
+
+// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private
+// key and certificate request as a template. All key types that are
+// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey
+// and *ecdsa.PrivateKey.)
+func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) {
+ t := *template
+ t.SignatureAlgorithm = sigType(privateKey)
+
+ csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ csrPemBlock := &pem.Block{
+ Type: CertificateRequestBlockType,
+ Bytes: csrDER,
+ }
+
+ return pem.EncodeToMemory(csrPemBlock), nil
+}
+
+func sigType(privateKey interface{}) x509.SignatureAlgorithm {
+ // Customize the signature for RSA keys, depending on the key size
+ if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
+ keySize := privateKey.N.BitLen()
+ switch {
+ case keySize >= 4096:
+ return x509.SHA512WithRSA
+ case keySize >= 3072:
+ return x509.SHA384WithRSA
+ default:
+ return x509.SHA256WithRSA
+ }
+ }
+ return x509.UnknownSignatureAlgorithm
+}
diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go
new file mode 100644
index 0000000..35fde68
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/io.go
@@ -0,0 +1,113 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// CanReadCertAndKey returns true if the certificate and key files already exists,
+// otherwise returns false. If lost one of cert and key, returns error.
+func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
+ certReadable := canReadFile(certPath)
+ keyReadable := canReadFile(keyPath)
+
+ if certReadable == false && keyReadable == false {
+ return false, nil
+ }
+
+ if certReadable == false {
+ return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
+ }
+
+ if keyReadable == false {
+ return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
+ }
+
+ return true, nil
+}
+
+// If the file represented by path exists and
+// readable, returns true otherwise returns false.
+func canReadFile(path string) bool {
+ f, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+
+ defer f.Close()
+
+ return true
+}
+
+// WriteCert writes the pem-encoded certificate data to certPath.
+// The certificate file will be created with file mode 0644.
+// If the certificate file already exists, it will be overwritten.
+// The parent directory of the certPath will be created as needed with file mode 0755.
+func WriteCert(certPath string, data []byte) error {
+ if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
+ return err
+ }
+ return ioutil.WriteFile(certPath, data, os.FileMode(0644))
+}
+
+// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func NewPool(filename string) (*x509.CertPool, error) {
+ pemBlock, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ pool, err := NewPoolFromBytes(pemBlock)
+ if err != nil {
+ return nil, fmt.Errorf("error creating pool from %s: %s", filename, err)
+ }
+ return pool, nil
+}
+
+// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) {
+ certs, err := ParseCertsPEM(pemBlock)
+ if err != nil {
+ return nil, err
+ }
+ pool := x509.NewCertPool()
+ for _, cert := range certs {
+ pool.AddCert(cert)
+ }
+ return pool, nil
+}
+
+// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func CertsFromFile(file string) ([]*x509.Certificate, error) {
+ pemBlock, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ certs, err := ParseCertsPEM(pemBlock)
+ if err != nil {
+ return nil, fmt.Errorf("error reading %s: %s", file, err)
+ }
+ return certs, nil
+}
diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go
new file mode 100644
index 0000000..c775123
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/pem.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+ "bytes"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+const (
+ // CertificateBlockType is a possible value for pem.Block.Type.
+ CertificateBlockType = "CERTIFICATE"
+ // CertificateRequestBlockType is a possible value for pem.Block.Type.
+ CertificateRequestBlockType = "CERTIFICATE REQUEST"
+)
+
+// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
+// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
+func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
+ ok := false
+ certs := []*x509.Certificate{}
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ break
+ }
+ // Only use PEM "CERTIFICATE" blocks without extra headers
+ if block.Type != CertificateBlockType || len(block.Headers) != 0 {
+ continue
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return certs, err
+ }
+
+ certs = append(certs, cert)
+ ok = true
+ }
+
+ if !ok {
+ return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
+ }
+ return certs, nil
+}
+
+// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs.
+func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) {
+ b := bytes.Buffer{}
+ for _, cert := range certs {
+ if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil {
+ return []byte{}, err
+ }
+ }
+ return b.Bytes(), nil
+}
diff --git a/vendor/k8s.io/client-go/util/cert/server_inspection.go b/vendor/k8s.io/client-go/util/cert/server_inspection.go
new file mode 100644
index 0000000..f1ef292
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/cert/server_inspection.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the
+// state of particular servers. apiHost is "host:port"
+func GetClientCANames(apiHost string) ([]string, error) {
+ // when we run this the second time, we know which one we are expecting
+ acceptableCAs := []string{}
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate
+ GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ acceptableCAs = []string{}
+ for _, curr := range hello.AcceptableCAs {
+ acceptableCAs = append(acceptableCAs, string(curr))
+ }
+ return &tls.Certificate{}, nil
+ },
+ }
+
+ conn, err := tls.Dial("tcp", apiHost, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ if err := conn.Close(); err != nil {
+ return nil, err
+ }
+
+ return acceptableCAs, nil
+}
+
+// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs
+func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) {
+ apiserverURL, err := url.Parse(kubeConfigURL)
+ if err != nil {
+ return nil, err
+ }
+ return GetClientCANames(apiserverURL.Host)
+}
+
+// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes.
+// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port"
+func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) {
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: true, // this is insecure so that we always get connected
+ }
+ // if a name is specified for SNI, set it.
+ if len(serverName) > 0 {
+ tlsConfig.ServerName = serverName
+ }
+
+ conn, err := tls.Dial("tcp", apiHost, tlsConfig)
+ if err != nil {
+ return nil, nil, err
+ }
+ if err = conn.Close(); err != nil {
+ return nil, nil, fmt.Errorf("failed to close connection : %v", err)
+ }
+
+ peerCerts := conn.ConnectionState().PeerCertificates
+ peerCertBytes := [][]byte{}
+ for _, a := range peerCerts {
+ actualCert, err := EncodeCertificates(a)
+ if err != nil {
+ return nil, nil, err
+ }
+ peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert))))
+ }
+
+ return peerCerts, peerCertBytes, err
+}
+
+// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs
+func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) {
+ apiserverURL, err := url.Parse(kubeConfigURL)
+ if err != nil {
+ return nil, nil, err
+ }
+ return GetServingCertificates(apiserverURL.Host, serverName)
+}
diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
new file mode 100644
index 0000000..f98faee
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package connrotation implements a connection dialer that tracks and can close
+// all created connections.
+//
+// This is used for credential rotation of long-lived connections, when there's
+// no way to re-authenticate on a live connection.
+package connrotation
+
+import (
+ "context"
+ "net"
+ "sync"
+)
+
+// DialFunc is a shorthand for signature of net.DialContext.
+type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
+
+// Dialer opens connections through Dial and tracks them.
+type Dialer struct {
+ dial DialFunc
+
+ mu sync.Mutex
+ conns map[*closableConn]struct{}
+}
+
+// NewDialer creates a new Dialer instance.
+//
+// If dial is not nil, it will be used to create new underlying connections.
+// Otherwise net.DialContext is used.
+func NewDialer(dial DialFunc) *Dialer {
+ return &Dialer{
+ dial: dial,
+ conns: make(map[*closableConn]struct{}),
+ }
+}
+
+// CloseAll forcibly closes all tracked connections.
+//
+// Note: new connections may get created before CloseAll returns.
+func (d *Dialer) CloseAll() {
+ d.mu.Lock()
+ conns := d.conns
+ d.conns = make(map[*closableConn]struct{})
+ d.mu.Unlock()
+
+ for conn := range conns {
+ conn.Close()
+ }
+}
+
+// Dial creates a new tracked connection.
+func (d *Dialer) Dial(network, address string) (net.Conn, error) {
+ return d.DialContext(context.Background(), network, address)
+}
+
+// DialContext creates a new tracked connection.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ conn, err := d.dial(ctx, network, address)
+ if err != nil {
+ return nil, err
+ }
+
+ closable := &closableConn{Conn: conn}
+
+ // When the connection is closed, remove it from the map. This will
+ // be no-op if the connection isn't in the map, e.g. if CloseAll()
+ // is called.
+ closable.onClose = func() {
+ d.mu.Lock()
+ delete(d.conns, closable)
+ d.mu.Unlock()
+ }
+
+ // Start tracking the connection
+ d.mu.Lock()
+ d.conns[closable] = struct{}{}
+ d.mu.Unlock()
+
+ return closable, nil
+}
+
+type closableConn struct {
+ onClose func()
+ net.Conn
+}
+
+func (c *closableConn) Close() error {
+ go c.onClose()
+ return c.Conn.Close()
+}
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
new file mode 100644
index 0000000..c48ba03
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+ "sync"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/clock"
+ "k8s.io/utils/integer"
+)
+
+type backoffEntry struct {
+ backoff time.Duration
+ lastUpdate time.Time
+}
+
+type Backoff struct {
+ sync.RWMutex
+ Clock clock.Clock
+ defaultDuration time.Duration
+ maxDuration time.Duration
+ perItemBackoff map[string]*backoffEntry
+}
+
+func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff {
+ return &Backoff{
+ perItemBackoff: map[string]*backoffEntry{},
+ Clock: tc,
+ defaultDuration: initial,
+ maxDuration: max,
+ }
+}
+
+func NewBackOff(initial, max time.Duration) *Backoff {
+ return &Backoff{
+ perItemBackoff: map[string]*backoffEntry{},
+ Clock: clock.RealClock{},
+ defaultDuration: initial,
+ maxDuration: max,
+ }
+}
+
+// Get the current backoff Duration
+func (p *Backoff) Get(id string) time.Duration {
+ p.RLock()
+ defer p.RUnlock()
+ var delay time.Duration
+ entry, ok := p.perItemBackoff[id]
+ if ok {
+ delay = entry.backoff
+ }
+ return delay
+}
+
+// move backoff to the next mark, capping at maxDuration
+func (p *Backoff) Next(id string, eventTime time.Time) {
+ p.Lock()
+ defer p.Unlock()
+ entry, ok := p.perItemBackoff[id]
+ if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ entry = p.initEntryUnsafe(id)
+ } else {
+ delay := entry.backoff * 2 // exponential
+ entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
+ }
+ entry.lastUpdate = p.Clock.Now()
+}
+
+// Reset forces clearing of all backoff data for a given key.
+func (p *Backoff) Reset(id string) {
+ p.Lock()
+ defer p.Unlock()
+ delete(p.perItemBackoff, id)
+}
+
+// Returns True if the elapsed time since eventTime is smaller than the current backoff window
+func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
+ p.RLock()
+ defer p.RUnlock()
+ entry, ok := p.perItemBackoff[id]
+ if !ok {
+ return false
+ }
+ if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ return false
+ }
+ return p.Clock.Since(eventTime) < entry.backoff
+}
+
+// Returns True if time since lastupdate is less than the current backoff window.
+func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
+ p.RLock()
+ defer p.RUnlock()
+ entry, ok := p.perItemBackoff[id]
+ if !ok {
+ return false
+ }
+ if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+ return false
+ }
+ return eventTime.Sub(entry.lastUpdate) < entry.backoff
+}
+
+// Garbage collect records that have aged past maxDuration. Backoff users are expected
+// to invoke this periodically.
+func (p *Backoff) GC() {
+ p.Lock()
+ defer p.Unlock()
+ now := p.Clock.Now()
+ for id, entry := range p.perItemBackoff {
+ if now.Sub(entry.lastUpdate) > p.maxDuration*2 {
+ // GC when entry has not been updated for 2*maxDuration
+ delete(p.perItemBackoff, id)
+ }
+ }
+}
+
+func (p *Backoff) DeleteEntry(id string) {
+ p.Lock()
+ defer p.Unlock()
+ delete(p.perItemBackoff, id)
+}
+
+// Take a lock on *Backoff, before calling initEntryUnsafe
+func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
+ entry := &backoffEntry{backoff: p.defaultDuration}
+ p.perItemBackoff[id] = entry
+ return entry
+}
+
+// After 2*maxDuration we restart the backoff factor to the beginning
+func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+ return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
+}
diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
new file mode 100644
index 0000000..ffd912c
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
@@ -0,0 +1,159 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "golang.org/x/time/rate"
+)
+
+type RateLimiter interface {
+ // TryAccept returns true if a token is taken immediately. Otherwise,
+ // it returns false.
+ TryAccept() bool
+ // Accept returns once a token becomes available.
+ Accept()
+ // Stop stops the rate limiter, subsequent calls to CanAccept will return false
+ Stop()
+ // QPS returns QPS of this rate limiter
+ QPS() float32
+ // Wait returns nil if a token is taken before the Context is done.
+ Wait(ctx context.Context) error
+}
+
+type tokenBucketRateLimiter struct {
+ limiter *rate.Limiter
+ clock Clock
+ qps float32
+}
+
+// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
+// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
+// smoothed qps rate of 'qps'.
+// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
+// The maximum number of tokens in the bucket is capped at 'burst'.
+func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
+ limiter := rate.NewLimiter(rate.Limit(qps), burst)
+ return newTokenBucketRateLimiter(limiter, realClock{}, qps)
+}
+
+// An injectable, mockable clock interface.
+type Clock interface {
+ Now() time.Time
+ Sleep(time.Duration)
+}
+
+type realClock struct{}
+
+func (realClock) Now() time.Time {
+ return time.Now()
+}
+func (realClock) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
+// but allows an injectable clock, for testing.
+func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
+ limiter := rate.NewLimiter(rate.Limit(qps), burst)
+ return newTokenBucketRateLimiter(limiter, c, qps)
+}
+
+func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter {
+ return &tokenBucketRateLimiter{
+ limiter: limiter,
+ clock: c,
+ qps: qps,
+ }
+}
+
+func (t *tokenBucketRateLimiter) TryAccept() bool {
+ return t.limiter.AllowN(t.clock.Now(), 1)
+}
+
+// Accept will block until a token becomes available
+func (t *tokenBucketRateLimiter) Accept() {
+ now := t.clock.Now()
+ t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now))
+}
+
+func (t *tokenBucketRateLimiter) Stop() {
+}
+
+func (t *tokenBucketRateLimiter) QPS() float32 {
+ return t.qps
+}
+
+func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error {
+ return t.limiter.Wait(ctx)
+}
+
+type fakeAlwaysRateLimiter struct{}
+
+func NewFakeAlwaysRateLimiter() RateLimiter {
+ return &fakeAlwaysRateLimiter{}
+}
+
+func (t *fakeAlwaysRateLimiter) TryAccept() bool {
+ return true
+}
+
+func (t *fakeAlwaysRateLimiter) Stop() {}
+
+func (t *fakeAlwaysRateLimiter) Accept() {}
+
+func (t *fakeAlwaysRateLimiter) QPS() float32 {
+ return 1
+}
+
+func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error {
+ return nil
+}
+
+type fakeNeverRateLimiter struct {
+ wg sync.WaitGroup
+}
+
+func NewFakeNeverRateLimiter() RateLimiter {
+ rl := fakeNeverRateLimiter{}
+ rl.wg.Add(1)
+ return &rl
+}
+
+func (t *fakeNeverRateLimiter) TryAccept() bool {
+ return false
+}
+
+func (t *fakeNeverRateLimiter) Stop() {
+ t.wg.Done()
+}
+
+func (t *fakeNeverRateLimiter) Accept() {
+ t.wg.Wait()
+}
+
+func (t *fakeNeverRateLimiter) QPS() float32 {
+ return 1
+}
+
+func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
+ return errors.New("can not be accept")
+}
diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go
new file mode 100644
index 0000000..3fdbeb8
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/homedir/homedir.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package homedir
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// HomeDir returns the home directory for the current user.
+// On Windows:
+// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
+// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
+// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
+// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
+func HomeDir() string {
+ if runtime.GOOS == "windows" {
+ home := os.Getenv("HOME")
+ homeDriveHomePath := ""
+ if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
+ homeDriveHomePath = homeDrive + homePath
+ }
+ userProfile := os.Getenv("USERPROFILE")
+
+ // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
+ // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
+ for _, p := range []string{home, homeDriveHomePath, userProfile} {
+ if len(p) == 0 {
+ continue
+ }
+ if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
+ continue
+ }
+ return p
+ }
+
+ firstSetPath := ""
+ firstExistingPath := ""
+
+ // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
+ for _, p := range []string{home, userProfile, homeDriveHomePath} {
+ if len(p) == 0 {
+ continue
+ }
+ if len(firstSetPath) == 0 {
+ // remember the first path that is set
+ firstSetPath = p
+ }
+ info, err := os.Stat(p)
+ if err != nil {
+ continue
+ }
+ if len(firstExistingPath) == 0 {
+ // remember the first path that exists
+ firstExistingPath = p
+ }
+ if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
+ // return first path that is writeable
+ return p
+ }
+ }
+
+ // If none are writeable, return first location that exists
+ if len(firstExistingPath) > 0 {
+ return firstExistingPath
+ }
+
+ // If none exist, return first location that is set
+ if len(firstSetPath) > 0 {
+ return firstSetPath
+ }
+
+ // We've got nothing
+ return ""
+ }
+ return os.Getenv("HOME")
+}
diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS
new file mode 100644
index 0000000..470b7a1
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS
@@ -0,0 +1,7 @@
+approvers:
+- sig-auth-certificates-approvers
+reviewers:
+- sig-auth-certificates-reviewers
+labels:
+- sig/auth
+
diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go
new file mode 100644
index 0000000..83c2c62
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/keyutil/key.go
@@ -0,0 +1,323 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package keyutil contains utilities for managing public/private key pairs.
+package keyutil
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ cryptorand "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+const (
+ // ECPrivateKeyBlockType is a possible value for pem.Block.Type.
+ ECPrivateKeyBlockType = "EC PRIVATE KEY"
+ // RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
+ RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
+ // PrivateKeyBlockType is a possible value for pem.Block.Type.
+ PrivateKeyBlockType = "PRIVATE KEY"
+ // PublicKeyBlockType is a possible value for pem.Block.Type.
+ PublicKeyBlockType = "PUBLIC KEY"
+)
+
+// MakeEllipticPrivateKeyPEM creates an ECDSA private key
+func MakeEllipticPrivateKeyPEM() ([]byte, error) {
+ privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
+ if err != nil {
+ return nil, err
+ }
+
+ derBytes, err := x509.MarshalECPrivateKey(privateKey)
+ if err != nil {
+ return nil, err
+ }
+
+ privateKeyPemBlock := &pem.Block{
+ Type: ECPrivateKeyBlockType,
+ Bytes: derBytes,
+ }
+ return pem.EncodeToMemory(privateKeyPemBlock), nil
+}
+
+// WriteKey writes the pem-encoded key data to keyPath.
+// The key file will be created with file mode 0600.
+// If the key file already exists, it will be overwritten.
+// The parent directory of the keyPath will be created as needed with file mode 0755.
+func WriteKey(keyPath string, data []byte) error {
+ if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
+ return err
+ }
+ return ioutil.WriteFile(keyPath, data, os.FileMode(0600))
+}
+
+// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
+// can't find one, it will generate a new key and store it there.
+func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
+ loadedData, err := ioutil.ReadFile(keyPath)
+ // Call verifyKeyData to ensure the file wasn't empty/corrupt.
+ if err == nil && verifyKeyData(loadedData) {
+ return loadedData, false, err
+ }
+ if !os.IsNotExist(err) {
+ return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
+ }
+
+ generatedData, err := MakeEllipticPrivateKeyPEM()
+ if err != nil {
+ return nil, false, fmt.Errorf("error generating key: %v", err)
+ }
+ if err := WriteKey(keyPath, generatedData); err != nil {
+ return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
+ }
+ return generatedData, true, nil
+}
+
+// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to
+// a PEM encoded block or returns an error.
+func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) {
+ switch t := privateKey.(type) {
+ case *ecdsa.PrivateKey:
+ derBytes, err := x509.MarshalECPrivateKey(t)
+ if err != nil {
+ return nil, err
+ }
+ block := &pem.Block{
+ Type: ECPrivateKeyBlockType,
+ Bytes: derBytes,
+ }
+ return pem.EncodeToMemory(block), nil
+ case *rsa.PrivateKey:
+ block := &pem.Block{
+ Type: RSAPrivateKeyBlockType,
+ Bytes: x509.MarshalPKCS1PrivateKey(t),
+ }
+ return pem.EncodeToMemory(block), nil
+ default:
+ return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey)
+ }
+}
+
+// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
+// Returns an error if the file could not be read or if the private key could not be parsed.
+func PrivateKeyFromFile(file string) (interface{}, error) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ key, err := ParsePrivateKeyPEM(data)
+ if err != nil {
+ return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
+ }
+ return key, nil
+}
+
+// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
+// Reads public keys from both public and private key files.
+func PublicKeysFromFile(file string) ([]interface{}, error) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+ keys, err := ParsePublicKeysPEM(data)
+ if err != nil {
+ return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
+ }
+ return keys, nil
+}
+
+// verifyKeyData returns true if the provided data appears to be a valid private key.
+func verifyKeyData(data []byte) bool {
+ if len(data) == 0 {
+ return false
+ }
+ _, err := ParsePrivateKeyPEM(data)
+ return err == nil
+}
+
+// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
+// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
+func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
+ var privateKeyPemBlock *pem.Block
+ for {
+ privateKeyPemBlock, keyData = pem.Decode(keyData)
+ if privateKeyPemBlock == nil {
+ break
+ }
+
+ switch privateKeyPemBlock.Type {
+ case ECPrivateKeyBlockType:
+ // ECDSA Private Key in ASN.1 format
+ if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
+ return key, nil
+ }
+ case RSAPrivateKeyBlockType:
+ // RSA Private Key in PKCS#1 format
+ if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+ return key, nil
+ }
+ case PrivateKeyBlockType:
+ // RSA or ECDSA Private Key in unencrypted PKCS#8 format
+ if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+ return key, nil
+ }
+ }
+
+ // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
+ // originally, only the first PEM block was parsed and expected to be a key block
+ }
+
+ // we read all the PEM blocks and didn't recognize one
+ return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
+}
+
+// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
+// Reads public keys from both public and private key files.
+func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
+ var block *pem.Block
+ keys := []interface{}{}
+ for {
+ // read the next block
+ block, keyData = pem.Decode(keyData)
+ if block == nil {
+ break
+ }
+
+ // test block against parsing functions
+ if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
+ keys = append(keys, &privateKey.PublicKey)
+ continue
+ }
+ if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
+ keys = append(keys, publicKey)
+ continue
+ }
+ if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
+ keys = append(keys, &privateKey.PublicKey)
+ continue
+ }
+ if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
+ keys = append(keys, publicKey)
+ continue
+ }
+
+ // tolerate non-key PEM blocks for backwards compatibility
+ // originally, only the first PEM block was parsed and expected to be a key block
+ }
+
+ if len(keys) == 0 {
+ return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
+ }
+ return keys, nil
+}
+
+// parseRSAPublicKey parses a single RSA public key from the provided data
+func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+ if cert, err := x509.ParseCertificate(data); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ // Test if parsed key is an RSA Public Key
+ var pubKey *rsa.PublicKey
+ var ok bool
+ if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
+ }
+
+ return pubKey, nil
+}
+
+// parseRSAPrivateKey parses a single RSA private key from the provided data
+func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
+ return nil, err
+ }
+ }
+
+ // Test if parsed key is an RSA Private Key
+ var privKey *rsa.PrivateKey
+ var ok bool
+ if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
+ }
+
+ return privKey, nil
+}
+
+// parseECPublicKey parses a single ECDSA public key from the provided data
+func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+ if cert, err := x509.ParseCertificate(data); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ // Test if parsed key is an ECDSA Public Key
+ var pubKey *ecdsa.PublicKey
+ var ok bool
+ if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
+ }
+
+ return pubKey, nil
+}
+
+// parseECPrivateKey parses a single ECDSA private key from the provided data
+func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
+ return nil, err
+ }
+
+ // Test if parsed key is an ECDSA Private Key
+ var privKey *ecdsa.PrivateKey
+ var ok bool
+ if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
+ }
+
+ return privKey, nil
+}
diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
new file mode 100644
index 0000000..71bb632
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+ "math"
+ "sync"
+ "time"
+
+ "golang.org/x/time/rate"
+)
+
+type RateLimiter interface {
+ // When gets an item and gets to decide how long that item should wait
+ When(item interface{}) time.Duration
+ // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
+ // or for success, we'll stop tracking it
+ Forget(item interface{})
+ // NumRequeues returns back how many failures the item has had
+ NumRequeues(item interface{}) int
+}
+
+// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
+// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
+func DefaultControllerRateLimiter() RateLimiter {
+ return NewMaxOfRateLimiter(
+ NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
+ // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
+ &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
+ )
+}
+
+// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
+type BucketRateLimiter struct {
+ *rate.Limiter
+}
+
+var _ RateLimiter = &BucketRateLimiter{}
+
+func (r *BucketRateLimiter) When(item interface{}) time.Duration {
+ return r.Limiter.Reserve().Delay()
+}
+
+func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
+ return 0
+}
+
+func (r *BucketRateLimiter) Forget(item interface{}) {
+}
+
+// ItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
+// dealing with max failures and expiration are up to the caller
+type ItemExponentialFailureRateLimiter struct {
+ failuresLock sync.Mutex
+ failures map[interface{}]int
+
+ baseDelay time.Duration
+ maxDelay time.Duration
+}
+
+var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
+
+func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
+ return &ItemExponentialFailureRateLimiter{
+ failures: map[interface{}]int{},
+ baseDelay: baseDelay,
+ maxDelay: maxDelay,
+ }
+}
+
+func DefaultItemBasedRateLimiter() RateLimiter {
+ return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
+}
+
+func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
+ r.failuresLock.Lock()
+ defer r.failuresLock.Unlock()
+
+ exp := r.failures[item]
+ r.failures[item] = r.failures[item] + 1
+
+ // The backoff is capped such that 'calculated' value never overflows.
+ backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
+ if backoff > math.MaxInt64 {
+ return r.maxDelay
+ }
+
+ calculated := time.Duration(backoff)
+ if calculated > r.maxDelay {
+ return r.maxDelay
+ }
+
+ return calculated
+}
+
+func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
+ r.failuresLock.Lock()
+ defer r.failuresLock.Unlock()
+
+ return r.failures[item]
+}
+
+func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
+ r.failuresLock.Lock()
+ defer r.failuresLock.Unlock()
+
+ delete(r.failures, item)
+}
+
+// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
+type ItemFastSlowRateLimiter struct {
+ failuresLock sync.Mutex
+ failures map[interface{}]int
+
+ maxFastAttempts int
+ fastDelay time.Duration
+ slowDelay time.Duration
+}
+
+var _ RateLimiter = &ItemFastSlowRateLimiter{}
+
+func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
+ return &ItemFastSlowRateLimiter{
+ failures: map[interface{}]int{},
+ fastDelay: fastDelay,
+ slowDelay: slowDelay,
+ maxFastAttempts: maxFastAttempts,
+ }
+}
+
+func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
+ r.failuresLock.Lock()
+ defer r.failuresLock.Unlock()
+
+ r.failures[item] = r.failures[item] + 1
+
+ if r.failures[item] <= r.maxFastAttempts {
+ return r.fastDelay
+ }
+
+ return r.slowDelay
+}
+
+func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
+ r.failuresLock.Lock()
+ defer r.failuresLock.Unlock()
+
+ return r.failures[item]
+}
+
+func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
+ r.failuresLock.Lock()
+ defer r.failuresLock.Unlock()
+
+ delete(r.failures, item)
+}
+
+// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
+// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
+// were separately delayed a longer time.
+type MaxOfRateLimiter struct {
+ limiters []RateLimiter
+}
+
+func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
+ ret := time.Duration(0)
+ for _, limiter := range r.limiters {
+ curr := limiter.When(item)
+ if curr > ret {
+ ret = curr
+ }
+ }
+
+ return ret
+}
+
+func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
+ return &MaxOfRateLimiter{limiters: limiters}
+}
+
+func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
+ ret := 0
+ for _, limiter := range r.limiters {
+ curr := limiter.NumRequeues(item)
+ if curr > ret {
+ ret = curr
+ }
+ }
+
+ return ret
+}
+
+func (r *MaxOfRateLimiter) Forget(item interface{}) {
+ for _, limiter := range r.limiters {
+ limiter.Forget(item)
+ }
+}
diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
new file mode 100644
index 0000000..31d9182
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
@@ -0,0 +1,280 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+ "container/heap"
+ "sync"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/clock"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
+// requeue items after failures without ending up in a hot-loop.
+type DelayingInterface interface {
+ Interface
+ // AddAfter adds an item to the workqueue after the indicated duration has passed
+ AddAfter(item interface{}, duration time.Duration)
+}
+
+// NewDelayingQueue constructs a new workqueue with delayed queuing ability
+func NewDelayingQueue() DelayingInterface {
+ return NewDelayingQueueWithCustomClock(clock.RealClock{}, "")
+}
+
+// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
+// inject custom queue Interface instead of the default one
+func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface {
+ return newDelayingQueue(clock.RealClock{}, q, name)
+}
+
+// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability
+func NewNamedDelayingQueue(name string) DelayingInterface {
+ return NewDelayingQueueWithCustomClock(clock.RealClock{}, name)
+}
+
+// NewDelayingQueueWithCustomClock constructs a new named workqueue
+// with ability to inject real or fake clock for testing purposes
+func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface {
+ return newDelayingQueue(clock, NewNamed(name), name)
+}
+
+func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType {
+ ret := &delayingType{
+ Interface: q,
+ clock: clock,
+ heartbeat: clock.NewTicker(maxWait),
+ stopCh: make(chan struct{}),
+ waitingForAddCh: make(chan *waitFor, 1000),
+ metrics: newRetryMetrics(name),
+ }
+
+ go ret.waitingLoop()
+ return ret
+}
+
+// delayingType wraps an Interface and provides delayed re-enquing
+type delayingType struct {
+ Interface
+
+ // clock tracks time for delayed firing
+ clock clock.Clock
+
+ // stopCh lets us signal a shutdown to the waiting loop
+ stopCh chan struct{}
+ // stopOnce guarantees we only signal shutdown a single time
+ stopOnce sync.Once
+
+ // heartbeat ensures we wait no more than maxWait before firing
+ heartbeat clock.Ticker
+
+ // waitingForAddCh is a buffered channel that feeds waitingForAdd
+ waitingForAddCh chan *waitFor
+
+ // metrics counts the number of retries
+ metrics retryMetrics
+}
+
+// waitFor holds the data to add and the time it should be added
+type waitFor struct {
+ data t
+ readyAt time.Time
+ // index in the priority queue (heap)
+ index int
+}
+
+// waitForPriorityQueue implements a priority queue for waitFor items.
+//
+// waitForPriorityQueue implements heap.Interface. The item occurring next in
+// time (i.e., the item with the smallest readyAt) is at the root (index 0).
+// Peek returns this minimum item at index 0. Pop returns the minimum item after
+// it has been removed from the queue and placed at index Len()-1 by
+// container/heap. Push adds an item at index Len(), and container/heap
+// percolates it into the correct location.
+type waitForPriorityQueue []*waitFor
+
+func (pq waitForPriorityQueue) Len() int {
+ return len(pq)
+}
+func (pq waitForPriorityQueue) Less(i, j int) bool {
+ return pq[i].readyAt.Before(pq[j].readyAt)
+}
+func (pq waitForPriorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+// Push adds an item to the queue. Push should not be called directly; instead,
+// use `heap.Push`.
+func (pq *waitForPriorityQueue) Push(x interface{}) {
+ n := len(*pq)
+ item := x.(*waitFor)
+ item.index = n
+ *pq = append(*pq, item)
+}
+
+// Pop removes an item from the queue. Pop should not be called directly;
+// instead, use `heap.Pop`.
+func (pq *waitForPriorityQueue) Pop() interface{} {
+ n := len(*pq)
+ item := (*pq)[n-1]
+ item.index = -1
+ *pq = (*pq)[0:(n - 1)]
+ return item
+}
+
+// Peek returns the item at the beginning of the queue, without removing the
+// item or otherwise mutating the queue. It is safe to call directly.
+func (pq waitForPriorityQueue) Peek() interface{} {
+ return pq[0]
+}
+
+// ShutDown stops the queue. After the queue drains, the returned shutdown bool
+// on Get() will be true. This method may be invoked more than once.
+func (q *delayingType) ShutDown() {
+ q.stopOnce.Do(func() {
+ q.Interface.ShutDown()
+ close(q.stopCh)
+ q.heartbeat.Stop()
+ })
+}
+
+// AddAfter adds the given item to the work queue after the given delay
+func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
+ // don't add if we're already shutting down
+ if q.ShuttingDown() {
+ return
+ }
+
+ q.metrics.retry()
+
+ // immediately add things with no delay
+ if duration <= 0 {
+ q.Add(item)
+ return
+ }
+
+ select {
+ case <-q.stopCh:
+ // unblock if ShutDown() is called
+ case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
+ }
+}
+
+// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
+// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
+// expired item sitting for more than 10 seconds.
+const maxWait = 10 * time.Second
+
+// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
+func (q *delayingType) waitingLoop() {
+ defer utilruntime.HandleCrash()
+
+ // Make a placeholder channel to use when there are no items in our list
+ never := make(<-chan time.Time)
+
+ // Make a timer that expires when the item at the head of the waiting queue is ready
+ var nextReadyAtTimer clock.Timer
+
+ waitingForQueue := &waitForPriorityQueue{}
+ heap.Init(waitingForQueue)
+
+ waitingEntryByData := map[t]*waitFor{}
+
+ for {
+ if q.Interface.ShuttingDown() {
+ return
+ }
+
+ now := q.clock.Now()
+
+ // Add ready entries
+ for waitingForQueue.Len() > 0 {
+ entry := waitingForQueue.Peek().(*waitFor)
+ if entry.readyAt.After(now) {
+ break
+ }
+
+ entry = heap.Pop(waitingForQueue).(*waitFor)
+ q.Add(entry.data)
+ delete(waitingEntryByData, entry.data)
+ }
+
+ // Set up a wait for the first item's readyAt (if one exists)
+ nextReadyAt := never
+ if waitingForQueue.Len() > 0 {
+ if nextReadyAtTimer != nil {
+ nextReadyAtTimer.Stop()
+ }
+ entry := waitingForQueue.Peek().(*waitFor)
+ nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
+ nextReadyAt = nextReadyAtTimer.C()
+ }
+
+ select {
+ case <-q.stopCh:
+ return
+
+ case <-q.heartbeat.C():
+ // continue the loop, which will add ready items
+
+ case <-nextReadyAt:
+ // continue the loop, which will add ready items
+
+ case waitEntry := <-q.waitingForAddCh:
+ if waitEntry.readyAt.After(q.clock.Now()) {
+ insert(waitingForQueue, waitingEntryByData, waitEntry)
+ } else {
+ q.Add(waitEntry.data)
+ }
+
+ drained := false
+ for !drained {
+ select {
+ case waitEntry := <-q.waitingForAddCh:
+ if waitEntry.readyAt.After(q.clock.Now()) {
+ insert(waitingForQueue, waitingEntryByData, waitEntry)
+ } else {
+ q.Add(waitEntry.data)
+ }
+ default:
+ drained = true
+ }
+ }
+ }
+ }
+}
+
+// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
+func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
+ // if the entry already exists, update the time only if it would cause the item to be queued sooner
+ existing, exists := knownEntries[entry.data]
+ if exists {
+ if existing.readyAt.After(entry.readyAt) {
+ existing.readyAt = entry.readyAt
+ heap.Fix(q, existing.index)
+ }
+
+ return
+ }
+
+ heap.Push(q, entry)
+ knownEntries[entry.data] = entry
+}
diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go
new file mode 100644
index 0000000..a5c976e
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/workqueue/doc.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package workqueue provides a simple queue that supports the following
+// features:
+// * Fair: items processed in the order in which they are added.
+// * Stingy: a single item will not be processed multiple times concurrently,
+// and if an item is added multiple times before it can be processed, it
+// will only be processed once.
+// * Multiple consumers and producers. In particular, it is allowed for an
+// item to be reenqueued while it is being processed.
+// * Shutdown notifications.
+package workqueue // import "k8s.io/client-go/util/workqueue"
diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go
new file mode 100644
index 0000000..556e643
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go
@@ -0,0 +1,261 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+ "sync"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/clock"
+)
+
+// This file provides abstractions for setting the provider (e.g., prometheus)
+// of metrics.
+
+type queueMetrics interface {
+ add(item t)
+ get(item t)
+ done(item t)
+ updateUnfinishedWork()
+}
+
+// GaugeMetric represents a single numerical value that can arbitrarily go up
+// and down.
+type GaugeMetric interface {
+ Inc()
+ Dec()
+}
+
+// SettableGaugeMetric represents a single numerical value that can arbitrarily go up
+// and down. (Separate from GaugeMetric to preserve backwards compatibility.)
+type SettableGaugeMetric interface {
+ Set(float64)
+}
+
+// CounterMetric represents a single numerical value that only ever
+// goes up.
+type CounterMetric interface {
+ Inc()
+}
+
+// SummaryMetric captures individual observations.
+type SummaryMetric interface {
+ Observe(float64)
+}
+
+// HistogramMetric counts individual observations.
+type HistogramMetric interface {
+ Observe(float64)
+}
+
+type noopMetric struct{}
+
+func (noopMetric) Inc() {}
+func (noopMetric) Dec() {}
+func (noopMetric) Set(float64) {}
+func (noopMetric) Observe(float64) {}
+
+// defaultQueueMetrics expects the caller to lock before setting any metrics.
+type defaultQueueMetrics struct {
+ clock clock.Clock
+
+ // current depth of a workqueue
+ depth GaugeMetric
+ // total number of adds handled by a workqueue
+ adds CounterMetric
+ // how long an item stays in a workqueue
+ latency HistogramMetric
+ // how long processing an item from a workqueue takes
+ workDuration HistogramMetric
+ addTimes map[t]time.Time
+ processingStartTimes map[t]time.Time
+
+ // how long have current threads been working?
+ unfinishedWorkSeconds SettableGaugeMetric
+ longestRunningProcessor SettableGaugeMetric
+}
+
+func (m *defaultQueueMetrics) add(item t) {
+ if m == nil {
+ return
+ }
+
+ m.adds.Inc()
+ m.depth.Inc()
+ if _, exists := m.addTimes[item]; !exists {
+ m.addTimes[item] = m.clock.Now()
+ }
+}
+
+func (m *defaultQueueMetrics) get(item t) {
+ if m == nil {
+ return
+ }
+
+ m.depth.Dec()
+ m.processingStartTimes[item] = m.clock.Now()
+ if startTime, exists := m.addTimes[item]; exists {
+ m.latency.Observe(m.sinceInSeconds(startTime))
+ delete(m.addTimes, item)
+ }
+}
+
+func (m *defaultQueueMetrics) done(item t) {
+ if m == nil {
+ return
+ }
+
+ if startTime, exists := m.processingStartTimes[item]; exists {
+ m.workDuration.Observe(m.sinceInSeconds(startTime))
+ delete(m.processingStartTimes, item)
+ }
+}
+
+func (m *defaultQueueMetrics) updateUnfinishedWork() {
+ // Note that a summary metric would be better for this, but prometheus
+ // doesn't seem to have non-hacky ways to reset the summary metrics.
+ var total float64
+ var oldest float64
+ for _, t := range m.processingStartTimes {
+ age := m.sinceInSeconds(t)
+ total += age
+ if age > oldest {
+ oldest = age
+ }
+ }
+ m.unfinishedWorkSeconds.Set(total)
+ m.longestRunningProcessor.Set(oldest)
+}
+
+type noMetrics struct{}
+
+func (noMetrics) add(item t) {}
+func (noMetrics) get(item t) {}
+func (noMetrics) done(item t) {}
+func (noMetrics) updateUnfinishedWork() {}
+
+// Gets the time since the specified start in seconds.
+func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
+ return m.clock.Since(start).Seconds()
+}
+
+type retryMetrics interface {
+ retry()
+}
+
+type defaultRetryMetrics struct {
+ retries CounterMetric
+}
+
+func (m *defaultRetryMetrics) retry() {
+ if m == nil {
+ return
+ }
+
+ m.retries.Inc()
+}
+
+// MetricsProvider generates various metrics used by the queue.
+type MetricsProvider interface {
+ NewDepthMetric(name string) GaugeMetric
+ NewAddsMetric(name string) CounterMetric
+ NewLatencyMetric(name string) HistogramMetric
+ NewWorkDurationMetric(name string) HistogramMetric
+ NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
+ NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
+ NewRetriesMetric(name string) CounterMetric
+}
+
+type noopMetricsProvider struct{}
+
+func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
+ return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
+ return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric {
+ return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric {
+ return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
+ return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric {
+ return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
+ return noopMetric{}
+}
+
+var globalMetricsFactory = queueMetricsFactory{
+ metricsProvider: noopMetricsProvider{},
+}
+
+type queueMetricsFactory struct {
+ metricsProvider MetricsProvider
+
+ onlyOnce sync.Once
+}
+
+func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
+ f.onlyOnce.Do(func() {
+ f.metricsProvider = mp
+ })
+}
+
+func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
+ mp := f.metricsProvider
+ if len(name) == 0 || mp == (noopMetricsProvider{}) {
+ return noMetrics{}
+ }
+ return &defaultQueueMetrics{
+ clock: clock,
+ depth: mp.NewDepthMetric(name),
+ adds: mp.NewAddsMetric(name),
+ latency: mp.NewLatencyMetric(name),
+ workDuration: mp.NewWorkDurationMetric(name),
+ unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
+ longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
+ addTimes: map[t]time.Time{},
+ processingStartTimes: map[t]time.Time{},
+ }
+}
+
+func newRetryMetrics(name string) retryMetrics {
+ var ret *defaultRetryMetrics
+ if len(name) == 0 {
+ return ret
+ }
+ return &defaultRetryMetrics{
+ retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name),
+ }
+}
+
+// SetProvider sets the metrics provider for all subsequently created work
+// queues. Only the first call has an effect.
+func SetProvider(metricsProvider MetricsProvider) {
+ globalMetricsFactory.setProvider(metricsProvider)
+}
diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
new file mode 100644
index 0000000..366bf20
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+ "context"
+ "sync"
+
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+type DoWorkPieceFunc func(piece int)
+
+type options struct {
+ chunkSize int
+}
+
+type Options func(*options)
+
+// WithChunkSize allows to set chunks of work items to the workers, rather than
+// processing one by one.
+// It is recommended to use this option if the number of pieces significantly
+// higher than the number of workers and the work done for each item is small.
+func WithChunkSize(c int) func(*options) {
+ return func(o *options) {
+ o.chunkSize = c
+ }
+}
+
+// ParallelizeUntil is a framework that allows for parallelizing N
+// independent pieces of work until done or the context is canceled.
+func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
+ if pieces == 0 {
+ return
+ }
+ o := options{}
+ for _, opt := range opts {
+ opt(&o)
+ }
+ chunkSize := o.chunkSize
+ if chunkSize < 1 {
+ chunkSize = 1
+ }
+
+ chunks := ceilDiv(pieces, chunkSize)
+ toProcess := make(chan int, chunks)
+ for i := 0; i < chunks; i++ {
+ toProcess <- i
+ }
+ close(toProcess)
+
+ var stop <-chan struct{}
+ if ctx != nil {
+ stop = ctx.Done()
+ }
+ if chunks < workers {
+ workers = chunks
+ }
+ wg := sync.WaitGroup{}
+ wg.Add(workers)
+ for i := 0; i < workers; i++ {
+ go func() {
+ defer utilruntime.HandleCrash()
+ defer wg.Done()
+ for chunk := range toProcess {
+ start := chunk * chunkSize
+ end := start + chunkSize
+ if end > pieces {
+ end = pieces
+ }
+ for p := start; p < end; p++ {
+ select {
+ case <-stop:
+ return
+ default:
+ doWorkPiece(p)
+ }
+ }
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+func ceilDiv(a, b int) int {
+ return (a + b - 1) / b
+}
diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go
new file mode 100644
index 0000000..39009b8
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/workqueue/queue.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+ "sync"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/clock"
+)
+
+type Interface interface {
+ Add(item interface{})
+ Len() int
+ Get() (item interface{}, shutdown bool)
+ Done(item interface{})
+ ShutDown()
+ ShuttingDown() bool
+}
+
+// New constructs a new work queue (see the package comment).
+func New() *Type {
+ return NewNamed("")
+}
+
+func NewNamed(name string) *Type {
+ rc := clock.RealClock{}
+ return newQueue(
+ rc,
+ globalMetricsFactory.newQueueMetrics(name, rc),
+ defaultUnfinishedWorkUpdatePeriod,
+ )
+}
+
+func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type {
+ t := &Type{
+ clock: c,
+ dirty: set{},
+ processing: set{},
+ cond: sync.NewCond(&sync.Mutex{}),
+ metrics: metrics,
+ unfinishedWorkUpdatePeriod: updatePeriod,
+ }
+ go t.updateUnfinishedWorkLoop()
+ return t
+}
+
+const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
+
+// Type is a work queue (see the package comment).
+type Type struct {
+ // queue defines the order in which we will work on items. Every
+ // element of queue should be in the dirty set and not in the
+ // processing set.
+ queue []t
+
+ // dirty defines all of the items that need to be processed.
+ dirty set
+
+ // Things that are currently being processed are in the processing set.
+ // These things may be simultaneously in the dirty set. When we finish
+ // processing something and remove it from this set, we'll check if
+ // it's in the dirty set, and if so, add it to the queue.
+ processing set
+
+ cond *sync.Cond
+
+ shuttingDown bool
+
+ metrics queueMetrics
+
+ unfinishedWorkUpdatePeriod time.Duration
+ clock clock.Clock
+}
+
+type empty struct{}
+type t interface{}
+type set map[t]empty
+
+func (s set) has(item t) bool {
+ _, exists := s[item]
+ return exists
+}
+
+func (s set) insert(item t) {
+ s[item] = empty{}
+}
+
+func (s set) delete(item t) {
+ delete(s, item)
+}
+
+// Add marks item as needing processing.
+func (q *Type) Add(item interface{}) {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ if q.shuttingDown {
+ return
+ }
+ if q.dirty.has(item) {
+ return
+ }
+
+ q.metrics.add(item)
+
+ q.dirty.insert(item)
+ if q.processing.has(item) {
+ return
+ }
+
+ q.queue = append(q.queue, item)
+ q.cond.Signal()
+}
+
+// Len returns the current queue length, for informational purposes only. You
+// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
+// value, that can't be synchronized properly.
+func (q *Type) Len() int {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ return len(q.queue)
+}
+
+// Get blocks until it can return an item to be processed. If shutdown = true,
+// the caller should end their goroutine. You must call Done with item when you
+// have finished processing it.
+func (q *Type) Get() (item interface{}, shutdown bool) {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ for len(q.queue) == 0 && !q.shuttingDown {
+ q.cond.Wait()
+ }
+ if len(q.queue) == 0 {
+ // We must be shutting down.
+ return nil, true
+ }
+
+ item, q.queue = q.queue[0], q.queue[1:]
+
+ q.metrics.get(item)
+
+ q.processing.insert(item)
+ q.dirty.delete(item)
+
+ return item, false
+}
+
+// Done marks item as done processing, and if it has been marked as dirty again
+// while it was being processed, it will be re-added to the queue for
+// re-processing.
+func (q *Type) Done(item interface{}) {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+
+ q.metrics.done(item)
+
+ q.processing.delete(item)
+ if q.dirty.has(item) {
+ q.queue = append(q.queue, item)
+ q.cond.Signal()
+ }
+}
+
+// ShutDown will cause q to ignore all new items added to it. As soon as the
+// worker goroutines have drained the existing items in the queue, they will be
+// instructed to exit.
+func (q *Type) ShutDown() {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ q.shuttingDown = true
+ q.cond.Broadcast()
+}
+
+func (q *Type) ShuttingDown() bool {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+
+ return q.shuttingDown
+}
+
+func (q *Type) updateUnfinishedWorkLoop() {
+ t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
+ defer t.Stop()
+ for range t.C() {
+ if !func() bool {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ if !q.shuttingDown {
+ q.metrics.updateUnfinishedWork()
+ return true
+ }
+ return false
+
+ }() {
+ return
+ }
+ }
+}
diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
new file mode 100644
index 0000000..8321876
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+// RateLimitingInterface is an interface that rate limits items being added to the queue.
+type RateLimitingInterface interface {
+ DelayingInterface
+
+ // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
+ AddRateLimited(item interface{})
+
+ // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing
+ // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
+ // still have to call `Done` on the queue.
+ Forget(item interface{})
+
+ // NumRequeues returns back how many times the item was requeued
+ NumRequeues(item interface{}) int
+}
+
+// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
+// Remember to call Forget! If you don't, you may end up tracking failures forever.
+func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
+ return &rateLimitingType{
+ DelayingInterface: NewDelayingQueue(),
+ rateLimiter: rateLimiter,
+ }
+}
+
+func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
+ return &rateLimitingType{
+ DelayingInterface: NewNamedDelayingQueue(name),
+ rateLimiter: rateLimiter,
+ }
+}
+
+// rateLimitingType wraps an Interface and provides rateLimited re-enquing
+type rateLimitingType struct {
+ DelayingInterface
+
+ rateLimiter RateLimiter
+}
+
+// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
+func (q *rateLimitingType) AddRateLimited(item interface{}) {
+ q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
+}
+
+func (q *rateLimitingType) NumRequeues(item interface{}) int {
+ return q.rateLimiter.NumRequeues(item)
+}
+
+func (q *rateLimitingType) Forget(item interface{}) {
+ q.rateLimiter.Forget(item)
+}