Import of https://github.com/ciena/voltctl at commit 40d61fbf3f910ed4017cf67c9c79e8e1f82a33a5
Change-Id: I8464c59e60d76cb8612891db3303878975b5416c
diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS
new file mode 100644
index 0000000..49dabc6
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/OWNERS
@@ -0,0 +1,26 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- smarterclayton
+- caesarxuchao
+- wojtek-t
+- deads2k
+- brendandburns
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- sttts
+- luxas
+- dims
+- errordeveloper
+- hongchaodeng
+- krousey
+- resouer
+- cjcullen
+- rmmh
+- lixiaobing10051267
+- asalkeld
+- juanvallejo
+- lojies
diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go
new file mode 100644
index 0000000..927403c
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/client.go
@@ -0,0 +1,258 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "fmt"
+ "mime"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/util/flowcontrol"
+)
+
+const (
+ // Environment variables: Note that the duration should be long enough that the backoff
+ // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1".
+ envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE"
+ envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION"
+)
+
+// Interface captures the set of operations for generically interacting with Kubernetes REST apis.
+type Interface interface {
+ GetRateLimiter() flowcontrol.RateLimiter
+ Verb(verb string) *Request
+ Post() *Request
+ Put() *Request
+ Patch(pt types.PatchType) *Request
+ Get() *Request
+ Delete() *Request
+ APIVersion() schema.GroupVersion
+}
+
+// RESTClient imposes common Kubernetes API conventions on a set of resource paths.
+// The baseURL is expected to point to an HTTP or HTTPS path that is the parent
+// of one or more resources. The server should return a decodable API resource
+// object, or an api.Status object which contains information about the reason for
+// any failure.
+//
+// Most consumers should use client.New() to get a Kubernetes API client.
+type RESTClient struct {
+ // base is the root URL for all invocations of the client
+ base *url.URL
+ // versionedAPIPath is a path segment connecting the base URL to the resource root
+ versionedAPIPath string
+
+ // contentConfig is the information used to communicate with the server.
+ contentConfig ContentConfig
+
+ // serializers contain all serializers for underlying content type.
+ serializers Serializers
+
+ // creates BackoffManager that is passed to requests.
+ createBackoffMgr func() BackoffManager
+
+ // TODO extract this into a wrapper interface via the RESTClient interface in kubectl.
+ Throttle flowcontrol.RateLimiter
+
+ // Set specific behavior of the client. If not set http.DefaultClient will be used.
+ Client *http.Client
+}
+
+type Serializers struct {
+ Encoder runtime.Encoder
+ Decoder runtime.Decoder
+ StreamingSerializer runtime.Serializer
+ Framer runtime.Framer
+ RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error)
+}
+
+// NewRESTClient creates a new RESTClient. This client performs generic REST functions
+// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and
+// decoding of responses from the server.
+func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
+ base := *baseURL
+ if !strings.HasSuffix(base.Path, "/") {
+ base.Path += "/"
+ }
+ base.RawQuery = ""
+ base.Fragment = ""
+
+ if config.GroupVersion == nil {
+ config.GroupVersion = &schema.GroupVersion{}
+ }
+ if len(config.ContentType) == 0 {
+ config.ContentType = "application/json"
+ }
+ serializers, err := createSerializers(config)
+ if err != nil {
+ return nil, err
+ }
+
+ var throttle flowcontrol.RateLimiter
+ if maxQPS > 0 && rateLimiter == nil {
+ throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst)
+ } else if rateLimiter != nil {
+ throttle = rateLimiter
+ }
+ return &RESTClient{
+ base: &base,
+ versionedAPIPath: versionedAPIPath,
+ contentConfig: config,
+ serializers: *serializers,
+ createBackoffMgr: readExpBackoffConfig,
+ Throttle: throttle,
+ Client: client,
+ }, nil
+}
+
+// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client
+func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
+ if c == nil {
+ return nil
+ }
+ return c.Throttle
+}
+
+// readExpBackoffConfig handles the internal logic of determining what the
+// backoff policy is. By default if no information is available, NoBackoff.
+// TODO Generalize this see #17727 .
+func readExpBackoffConfig() BackoffManager {
+ backoffBase := os.Getenv(envBackoffBase)
+ backoffDuration := os.Getenv(envBackoffDuration)
+
+ backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64)
+ backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64)
+ if errBase != nil || errDuration != nil {
+ return &NoBackoff{}
+ }
+ return &URLBackoff{
+ Backoff: flowcontrol.NewBackOff(
+ time.Duration(backoffBaseInt)*time.Second,
+ time.Duration(backoffDurationInt)*time.Second)}
+}
+
+// createSerializers creates all necessary serializers for given contentType.
+// TODO: the negotiated serializer passed to this method should probably return
+// serializers that control decoding and versioning without this package
+// being aware of the types. Depends on whether RESTClient must deal with
+// generic infrastructure.
+func createSerializers(config ContentConfig) (*Serializers, error) {
+ mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes()
+ contentType := config.ContentType
+ mediaType, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err)
+ }
+ info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType)
+ if !ok {
+ if len(contentType) != 0 || len(mediaTypes) == 0 {
+ return nil, fmt.Errorf("no serializers registered for %s", contentType)
+ }
+ info = mediaTypes[0]
+ }
+
+ internalGV := schema.GroupVersions{
+ {
+ Group: config.GroupVersion.Group,
+ Version: runtime.APIVersionInternal,
+ },
+ // always include the legacy group as a decoding target to handle non-error `Status` return types
+ {
+ Group: "",
+ Version: runtime.APIVersionInternal,
+ },
+ }
+
+ s := &Serializers{
+ Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion),
+ Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV),
+
+ RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) {
+ info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType)
+ if !ok {
+ return nil, fmt.Errorf("serializer for %s not registered", contentType)
+ }
+ return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil
+ },
+ }
+ if info.StreamSerializer != nil {
+ s.StreamingSerializer = info.StreamSerializer.Serializer
+ s.Framer = info.StreamSerializer.Framer
+ }
+
+ return s, nil
+}
+
+// Verb begins a request with a verb (GET, POST, PUT, DELETE).
+//
+// Example usage of RESTClient's request building interface:
+// c, err := NewRESTClient(...)
+// if err != nil { ... }
+// resp, err := c.Verb("GET").
+// Path("pods").
+// SelectorParam("labels", "area=staging").
+// Timeout(10*time.Second).
+// Do()
+// if err != nil { ... }
+// list, ok := resp.(*api.PodList)
+//
+func (c *RESTClient) Verb(verb string) *Request {
+ backoff := c.createBackoffMgr()
+
+ if c.Client == nil {
+ return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0)
+ }
+ return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout)
+}
+
+// Post begins a POST request. Short for c.Verb("POST").
+func (c *RESTClient) Post() *Request {
+ return c.Verb("POST")
+}
+
+// Put begins a PUT request. Short for c.Verb("PUT").
+func (c *RESTClient) Put() *Request {
+ return c.Verb("PUT")
+}
+
+// Patch begins a PATCH request. Short for c.Verb("Patch").
+func (c *RESTClient) Patch(pt types.PatchType) *Request {
+ return c.Verb("PATCH").SetHeader("Content-Type", string(pt))
+}
+
+// Get begins a GET request. Short for c.Verb("GET").
+func (c *RESTClient) Get() *Request {
+ return c.Verb("GET")
+}
+
+// Delete begins a DELETE request. Short for c.Verb("DELETE").
+func (c *RESTClient) Delete() *Request {
+ return c.Verb("DELETE")
+}
+
+// APIVersion returns the APIVersion this RESTClient is expected to use.
+func (c *RESTClient) APIVersion() schema.GroupVersion {
+ return *c.contentConfig.GroupVersion
+}
diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go
new file mode 100644
index 0000000..3f6b9bc
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/config.go
@@ -0,0 +1,551 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ gruntime "runtime"
+ "strings"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/pkg/version"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+ "k8s.io/client-go/transport"
+ certutil "k8s.io/client-go/util/cert"
+ "k8s.io/client-go/util/flowcontrol"
+ "k8s.io/klog"
+)
+
+const (
+ DefaultQPS float32 = 5.0
+ DefaultBurst int = 10
+)
+
+var ErrNotInCluster = errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
+
+// Config holds the common attributes that can be passed to a Kubernetes client on
+// initialization.
+type Config struct {
+ // Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
+ // If a URL is given then the (optional) Path of that URL represents a prefix that must
+ // be appended to all request URIs used to access the apiserver. This allows a frontend
+ // proxy to easily relocate all of the apiserver endpoints.
+ Host string
+ // APIPath is a sub-path that points to an API root.
+ APIPath string
+
+ // ContentConfig contains settings that affect how objects are transformed when
+ // sent to the server.
+ ContentConfig
+
+ // Server requires Basic authentication
+ Username string
+ Password string
+
+ // Server requires Bearer authentication. This client will not attempt to use
+ // refresh tokens for an OAuth2 flow.
+ // TODO: demonstrate an OAuth2 compatible client.
+ BearerToken string
+
+ // Path to a file containing a BearerToken.
+ // If set, the contents are periodically read.
+ // The last successfully read value takes precedence over BearerToken.
+ BearerTokenFile string
+
+ // Impersonate is the configuration that RESTClient will use for impersonation.
+ Impersonate ImpersonationConfig
+
+ // Server requires plugin-specified authentication.
+ AuthProvider *clientcmdapi.AuthProviderConfig
+
+ // Callback to persist config for AuthProvider.
+ AuthConfigPersister AuthProviderConfigPersister
+
+ // Exec-based authentication provider.
+ ExecProvider *clientcmdapi.ExecConfig
+
+ // TLSClientConfig contains settings to enable transport layer security
+ TLSClientConfig
+
+ // UserAgent is an optional field that specifies the caller of this request.
+ UserAgent string
+
+ // Transport may be used for custom HTTP behavior. This attribute may not
+ // be specified with the TLS client certificate options. Use WrapTransport
+ // to provide additional per-server middleware behavior.
+ Transport http.RoundTripper
+ // WrapTransport will be invoked for custom HTTP behavior after the underlying
+ // transport is initialized (either the transport created from TLSClientConfig,
+ // Transport, or http.DefaultTransport). The config may layer other RoundTrippers
+ // on top of the returned RoundTripper.
+ //
+ // A future release will change this field to an array. Use config.Wrap()
+ // instead of setting this value directly.
+ WrapTransport transport.WrapperFunc
+
+ // QPS indicates the maximum QPS to the master from this client.
+ // If it's zero, the created RESTClient will use DefaultQPS: 5
+ QPS float32
+
+ // Maximum burst for throttle.
+ // If it's zero, the created RESTClient will use DefaultBurst: 10.
+ Burst int
+
+ // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst
+ RateLimiter flowcontrol.RateLimiter
+
+ // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
+ Timeout time.Duration
+
+ // Dial specifies the dial function for creating unencrypted TCP connections.
+ Dial func(ctx context.Context, network, address string) (net.Conn, error)
+
+ // Version forces a specific version to be used (if registered)
+ // Do we need this?
+ // Version string
+}
+
+var _ fmt.Stringer = new(Config)
+var _ fmt.GoStringer = new(Config)
+
+type sanitizedConfig *Config
+
+type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister }
+
+func (sanitizedAuthConfigPersister) GoString() string {
+ return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
+}
+func (sanitizedAuthConfigPersister) String() string {
+ return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
+}
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config
+// to prevent accidental leaking via logs.
+func (c *Config) GoString() string {
+ return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of Config to
+// prevent accidental leaking via logs.
+func (c *Config) String() string {
+ if c == nil {
+ return "<nil>"
+ }
+ cc := sanitizedConfig(CopyConfig(c))
+ // Explicitly mark non-empty credential fields as redacted.
+ if cc.Password != "" {
+ cc.Password = "--- REDACTED ---"
+ }
+ if cc.BearerToken != "" {
+ cc.BearerToken = "--- REDACTED ---"
+ }
+ if cc.AuthConfigPersister != nil {
+ cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister}
+ }
+
+ return fmt.Sprintf("%#v", cc)
+}
+
+// ImpersonationConfig has all the available impersonation options
+type ImpersonationConfig struct {
+ // UserName is the username to impersonate on each request.
+ UserName string
+ // Groups are the groups to impersonate on each request.
+ Groups []string
+ // Extra is a free-form field which can be used to link some authentication information
+ // to authorization information. This field allows you to impersonate it.
+ Extra map[string][]string
+}
+
+// +k8s:deepcopy-gen=true
+// TLSClientConfig contains settings to enable transport layer security
+type TLSClientConfig struct {
+ // Server should be accessed without verifying the TLS certificate. For testing only.
+ Insecure bool
+ // ServerName is passed to the server for SNI and is used in the client to check server
+ // ceritificates against. If ServerName is empty, the hostname used to contact the
+ // server is used.
+ ServerName string
+
+ // Server requires TLS client certificate authentication
+ CertFile string
+ // Server requires TLS client certificate authentication
+ KeyFile string
+ // Trusted root certificates for server
+ CAFile string
+
+ // CertData holds PEM-encoded bytes (typically read from a client certificate file).
+ // CertData takes precedence over CertFile
+ CertData []byte
+ // KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
+ // KeyData takes precedence over KeyFile
+ KeyData []byte
+ // CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
+ // CAData takes precedence over CAFile
+ CAData []byte
+}
+
+var _ fmt.Stringer = TLSClientConfig{}
+var _ fmt.GoStringer = TLSClientConfig{}
+
+type sanitizedTLSClientConfig TLSClientConfig
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of
+// TLSClientConfig to prevent accidental leaking via logs.
+func (c TLSClientConfig) GoString() string {
+ return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of
+// TLSClientConfig to prevent accidental leaking via logs.
+func (c TLSClientConfig) String() string {
+ cc := sanitizedTLSClientConfig{
+ Insecure: c.Insecure,
+ ServerName: c.ServerName,
+ CertFile: c.CertFile,
+ KeyFile: c.KeyFile,
+ CAFile: c.CAFile,
+ CertData: c.CertData,
+ KeyData: c.KeyData,
+ CAData: c.CAData,
+ }
+ // Explicitly mark non-empty credential fields as redacted.
+ if len(cc.CertData) != 0 {
+ cc.CertData = []byte("--- TRUNCATED ---")
+ }
+ if len(cc.KeyData) != 0 {
+ cc.KeyData = []byte("--- REDACTED ---")
+ }
+ return fmt.Sprintf("%#v", cc)
+}
+
+type ContentConfig struct {
+ // AcceptContentTypes specifies the types the client will accept and is optional.
+ // If not set, ContentType will be used to define the Accept header
+ AcceptContentTypes string
+ // ContentType specifies the wire format used to communicate with the server.
+ // This value will be set as the Accept header on requests made to the server, and
+ // as the default content type on any object sent to the server. If not set,
+ // "application/json" is used.
+ ContentType string
+ // GroupVersion is the API version to talk to. Must be provided when initializing
+ // a RESTClient directly. When initializing a Client, will be set with the default
+ // code version.
+ GroupVersion *schema.GroupVersion
+ // NegotiatedSerializer is used for obtaining encoders and decoders for multiple
+ // supported media types.
+ NegotiatedSerializer runtime.NegotiatedSerializer
+}
+
+// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
+// object. Note that a RESTClient may require fields that are optional when initializing a Client.
+// A RESTClient created by this method is generic - it expects to operate on an API that follows
+// the Kubernetes conventions, but may not be the Kubernetes API.
+func RESTClientFor(config *Config) (*RESTClient, error) {
+ if config.GroupVersion == nil {
+ return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient")
+ }
+ if config.NegotiatedSerializer == nil {
+ return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+ }
+ qps := config.QPS
+ if config.QPS == 0.0 {
+ qps = DefaultQPS
+ }
+ burst := config.Burst
+ if config.Burst == 0 {
+ burst = DefaultBurst
+ }
+
+ baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ transport, err := TransportFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ var httpClient *http.Client
+ if transport != http.DefaultTransport {
+ httpClient = &http.Client{Transport: transport}
+ if config.Timeout > 0 {
+ httpClient.Timeout = config.Timeout
+ }
+ }
+
+ return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient)
+}
+
+// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
+// the config.Version to be empty.
+func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
+ if config.NegotiatedSerializer == nil {
+ return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+ }
+
+ baseURL, versionedAPIPath, err := defaultServerUrlFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ transport, err := TransportFor(config)
+ if err != nil {
+ return nil, err
+ }
+
+ var httpClient *http.Client
+ if transport != http.DefaultTransport {
+ httpClient = &http.Client{Transport: transport}
+ if config.Timeout > 0 {
+ httpClient.Timeout = config.Timeout
+ }
+ }
+
+ versionConfig := config.ContentConfig
+ if versionConfig.GroupVersion == nil {
+ v := metav1.SchemeGroupVersion
+ versionConfig.GroupVersion = &v
+ }
+
+ return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient)
+}
+
+// SetKubernetesDefaults sets default values on the provided client config for accessing the
+// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
+func SetKubernetesDefaults(config *Config) error {
+ if len(config.UserAgent) == 0 {
+ config.UserAgent = DefaultKubernetesUserAgent()
+ }
+ return nil
+}
+
+// adjustCommit returns sufficient significant figures of the commit's git hash.
+func adjustCommit(c string) string {
+ if len(c) == 0 {
+ return "unknown"
+ }
+ if len(c) > 7 {
+ return c[:7]
+ }
+ return c
+}
+
+// adjustVersion strips "alpha", "beta", etc. from version in form
+// major.minor.patch-[alpha|beta|etc].
+func adjustVersion(v string) string {
+ if len(v) == 0 {
+ return "unknown"
+ }
+ seg := strings.SplitN(v, "-", 2)
+ return seg[0]
+}
+
+// adjustCommand returns the last component of the
+// OS-specific command path for use in User-Agent.
+func adjustCommand(p string) string {
+ // Unlikely, but better than returning "".
+ if len(p) == 0 {
+ return "unknown"
+ }
+ return filepath.Base(p)
+}
+
+// buildUserAgent builds a User-Agent string from given args.
+func buildUserAgent(command, version, os, arch, commit string) string {
+ return fmt.Sprintf(
+ "%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit)
+}
+
+// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars.
+func DefaultKubernetesUserAgent() string {
+ return buildUserAgent(
+ adjustCommand(os.Args[0]),
+ adjustVersion(version.Get().GitVersion),
+ gruntime.GOOS,
+ gruntime.GOARCH,
+ adjustCommit(version.Get().GitCommit))
+}
+
+// InClusterConfig returns a config object which uses the service account
+// kubernetes gives to pods. It's intended for clients that expect to be
+// running inside a pod running on kubernetes. It will return ErrNotInCluster
+// if called from a process not running in a kubernetes environment.
+func InClusterConfig() (*Config, error) {
+ const (
+ tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+ rootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ )
+ host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
+ if len(host) == 0 || len(port) == 0 {
+ return nil, ErrNotInCluster
+ }
+
+ token, err := ioutil.ReadFile(tokenFile)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsClientConfig := TLSClientConfig{}
+
+ if _, err := certutil.NewPool(rootCAFile); err != nil {
+ klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
+ } else {
+ tlsClientConfig.CAFile = rootCAFile
+ }
+
+ return &Config{
+ // TODO: switch to using cluster DNS.
+ Host: "https://" + net.JoinHostPort(host, port),
+ TLSClientConfig: tlsClientConfig,
+ BearerToken: string(token),
+ BearerTokenFile: tokenFile,
+ }, nil
+}
+
+// IsConfigTransportTLS returns true if and only if the provided
+// config will result in a protected connection to the server when it
+// is passed to restclient.RESTClientFor(). Use to determine when to
+// send credentials over the wire.
+//
+// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
+// still possible.
+func IsConfigTransportTLS(config Config) bool {
+ baseURL, _, err := defaultServerUrlFor(&config)
+ if err != nil {
+ return false
+ }
+ return baseURL.Scheme == "https"
+}
+
+// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func LoadTLSFiles(c *Config) error {
+ var err error
+ c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile)
+ if err != nil {
+ return err
+ }
+
+ c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile)
+ if err != nil {
+ return err
+ }
+
+ c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+ if len(data) > 0 {
+ return data, nil
+ }
+ if len(file) > 0 {
+ fileData, err := ioutil.ReadFile(file)
+ if err != nil {
+ return []byte{}, err
+ }
+ return fileData, nil
+ }
+ return nil, nil
+}
+
+func AddUserAgent(config *Config, userAgent string) *Config {
+ fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent
+ config.UserAgent = fullUserAgent
+ return config
+}
+
+// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed
+func AnonymousClientConfig(config *Config) *Config {
+ // copy only known safe fields
+ return &Config{
+ Host: config.Host,
+ APIPath: config.APIPath,
+ ContentConfig: config.ContentConfig,
+ TLSClientConfig: TLSClientConfig{
+ Insecure: config.Insecure,
+ ServerName: config.ServerName,
+ CAFile: config.TLSClientConfig.CAFile,
+ CAData: config.TLSClientConfig.CAData,
+ },
+ RateLimiter: config.RateLimiter,
+ UserAgent: config.UserAgent,
+ Transport: config.Transport,
+ WrapTransport: config.WrapTransport,
+ QPS: config.QPS,
+ Burst: config.Burst,
+ Timeout: config.Timeout,
+ Dial: config.Dial,
+ }
+}
+
+// CopyConfig returns a copy of the given config
+func CopyConfig(config *Config) *Config {
+ return &Config{
+ Host: config.Host,
+ APIPath: config.APIPath,
+ ContentConfig: config.ContentConfig,
+ Username: config.Username,
+ Password: config.Password,
+ BearerToken: config.BearerToken,
+ BearerTokenFile: config.BearerTokenFile,
+ Impersonate: ImpersonationConfig{
+ Groups: config.Impersonate.Groups,
+ Extra: config.Impersonate.Extra,
+ UserName: config.Impersonate.UserName,
+ },
+ AuthProvider: config.AuthProvider,
+ AuthConfigPersister: config.AuthConfigPersister,
+ ExecProvider: config.ExecProvider,
+ TLSClientConfig: TLSClientConfig{
+ Insecure: config.TLSClientConfig.Insecure,
+ ServerName: config.TLSClientConfig.ServerName,
+ CertFile: config.TLSClientConfig.CertFile,
+ KeyFile: config.TLSClientConfig.KeyFile,
+ CAFile: config.TLSClientConfig.CAFile,
+ CertData: config.TLSClientConfig.CertData,
+ KeyData: config.TLSClientConfig.KeyData,
+ CAData: config.TLSClientConfig.CAData,
+ },
+ UserAgent: config.UserAgent,
+ Transport: config.Transport,
+ WrapTransport: config.WrapTransport,
+ QPS: config.QPS,
+ Burst: config.Burst,
+ RateLimiter: config.RateLimiter,
+ Timeout: config.Timeout,
+ Dial: config.Dial,
+ }
+}
diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go
new file mode 100644
index 0000000..83ef5ae
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/plugin.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "k8s.io/klog"
+
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+type AuthProvider interface {
+ // WrapTransport allows the plugin to create a modified RoundTripper that
+ // attaches authorization headers (or other info) to requests.
+ WrapTransport(http.RoundTripper) http.RoundTripper
+ // Login allows the plugin to initialize its configuration. It must not
+ // require direct user interaction.
+ Login() error
+}
+
+// Factory generates an AuthProvider plugin.
+// clusterAddress is the address of the current cluster.
+// config is the initial configuration for this plugin.
+// persister allows the plugin to save updated configuration.
+type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error)
+
+// AuthProviderConfigPersister allows a plugin to persist configuration info
+// for just itself.
+type AuthProviderConfigPersister interface {
+ Persist(map[string]string) error
+}
+
+// All registered auth provider plugins.
+var pluginsLock sync.Mutex
+var plugins = make(map[string]Factory)
+
+func RegisterAuthProviderPlugin(name string, plugin Factory) error {
+ pluginsLock.Lock()
+ defer pluginsLock.Unlock()
+ if _, found := plugins[name]; found {
+ return fmt.Errorf("Auth Provider Plugin %q was registered twice", name)
+ }
+ klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
+ plugins[name] = plugin
+ return nil
+}
+
+func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) {
+ pluginsLock.Lock()
+ defer pluginsLock.Unlock()
+ p, ok := plugins[apc.Name]
+ if !ok {
+ return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name)
+ }
+ return p(clusterAddress, apc.Config, persister)
+}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
new file mode 100644
index 0000000..dd06303
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -0,0 +1,1201 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/http2"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+ "k8s.io/apimachinery/pkg/util/net"
+ "k8s.io/apimachinery/pkg/watch"
+ restclientwatch "k8s.io/client-go/rest/watch"
+ "k8s.io/client-go/tools/metrics"
+ "k8s.io/client-go/util/flowcontrol"
+ "k8s.io/klog"
+)
+
+var (
+ // longThrottleLatency defines threshold for logging requests. All requests being
+ // throttle for more than longThrottleLatency will be logged.
+ longThrottleLatency = 50 * time.Millisecond
+)
+
+// HTTPClient is an interface for testing a request object.
+type HTTPClient interface {
+ Do(req *http.Request) (*http.Response, error)
+}
+
+// ResponseWrapper is an interface for getting a response.
+// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream.
+type ResponseWrapper interface {
+ DoRaw() ([]byte, error)
+ Stream() (io.ReadCloser, error)
+}
+
+// RequestConstructionError is returned when there's an error assembling a request.
+type RequestConstructionError struct {
+ Err error
+}
+
+// Error returns a textual description of 'r'.
+func (r *RequestConstructionError) Error() string {
+ return fmt.Sprintf("request construction error: '%v'", r.Err)
+}
+
+// Request allows for building up a request to a server in a chained fashion.
+// Any errors are stored until the end of your call, so you only have to
+// check once.
+type Request struct {
+ // required
+ client HTTPClient
+ verb string
+
+ baseURL *url.URL
+ content ContentConfig
+ serializers Serializers
+
+ // generic components accessible via method setters
+ pathPrefix string
+ subpath string
+ params url.Values
+ headers http.Header
+
+ // structural elements of the request that are part of the Kubernetes API conventions
+ namespace string
+ namespaceSet bool
+ resource string
+ resourceName string
+ subresource string
+ timeout time.Duration
+
+ // output
+ err error
+ body io.Reader
+
+ // This is only used for per-request timeouts, deadlines, and cancellations.
+ ctx context.Context
+
+ backoffMgr BackoffManager
+ throttle flowcontrol.RateLimiter
+}
+
+// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
+func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request {
+ if backoff == nil {
+ klog.V(2).Infof("Not implementing request backoff strategy.")
+ backoff = &NoBackoff{}
+ }
+
+ pathPrefix := "/"
+ if baseURL != nil {
+ pathPrefix = path.Join(pathPrefix, baseURL.Path)
+ }
+ r := &Request{
+ client: client,
+ verb: verb,
+ baseURL: baseURL,
+ pathPrefix: path.Join(pathPrefix, versionedAPIPath),
+ content: content,
+ serializers: serializers,
+ backoffMgr: backoff,
+ throttle: throttle,
+ timeout: timeout,
+ }
+ switch {
+ case len(content.AcceptContentTypes) > 0:
+ r.SetHeader("Accept", content.AcceptContentTypes)
+ case len(content.ContentType) > 0:
+ r.SetHeader("Accept", content.ContentType+", */*")
+ }
+ return r
+}
+
+// Prefix adds segments to the relative beginning to the request path. These
+// items will be placed before the optional Namespace, Resource, or Name sections.
+// Setting AbsPath will clear any previously set Prefix segments
+func (r *Request) Prefix(segments ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...))
+ return r
+}
+
+// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional
+// Namespace, Resource, or Name sections.
+func (r *Request) Suffix(segments ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.subpath = path.Join(r.subpath, path.Join(segments...))
+ return r
+}
+
+// Resource sets the resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Resource(resource string) *Request {
+ if r.err != nil {
+ return r
+ }
+ if len(r.resource) != 0 {
+ r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource)
+ return r
+ }
+ if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs)
+ return r
+ }
+ r.resource = resource
+ return r
+}
+
+// BackOff sets the request's backoff manager to the one specified,
+// or defaults to the stub implementation if nil is provided
+func (r *Request) BackOff(manager BackoffManager) *Request {
+ if manager == nil {
+ r.backoffMgr = &NoBackoff{}
+ return r
+ }
+
+ r.backoffMgr = manager
+ return r
+}
+
+// Throttle receives a rate-limiter and sets or replaces an existing request limiter
+func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request {
+ r.throttle = limiter
+ return r
+}
+
+// SubResource sets a sub-resource path which can be multiple segments after the resource
+// name but before the suffix.
+func (r *Request) SubResource(subresources ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ subresource := path.Join(subresources...)
+ if len(r.subresource) != 0 {
+ r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource)
+ return r
+ }
+ for _, s := range subresources {
+ if msgs := IsValidPathSegmentName(s); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs)
+ return r
+ }
+ }
+ r.subresource = subresource
+ return r
+}
+
+// Name sets the name of a resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Name(resourceName string) *Request {
+ if r.err != nil {
+ return r
+ }
+ if len(resourceName) == 0 {
+ r.err = fmt.Errorf("resource name may not be empty")
+ return r
+ }
+ if len(r.resourceName) != 0 {
+ r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName)
+ return r
+ }
+ if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs)
+ return r
+ }
+ r.resourceName = resourceName
+ return r
+}
+
+// Namespace applies the namespace scope to a request (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Namespace(namespace string) *Request {
+ if r.err != nil {
+ return r
+ }
+ if r.namespaceSet {
+ r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace)
+ return r
+ }
+ if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 {
+ r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs)
+ return r
+ }
+ r.namespaceSet = true
+ r.namespace = namespace
+ return r
+}
+
+// NamespaceIfScoped is a convenience function to set a namespace if scoped is true
+func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request {
+ if scoped {
+ return r.Namespace(namespace)
+ }
+ return r
+}
+
+// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved
+// when a single segment is passed.
+func (r *Request) AbsPath(segments ...string) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...))
+ if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
+ // preserve any trailing slashes for legacy behavior
+ r.pathPrefix += "/"
+ }
+ return r
+}
+
+// RequestURI overwrites existing path and parameters with the value of the provided server relative
+// URI.
+func (r *Request) RequestURI(uri string) *Request {
+ if r.err != nil {
+ return r
+ }
+ locator, err := url.Parse(uri)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ r.pathPrefix = locator.Path
+ if len(locator.Query()) > 0 {
+ if r.params == nil {
+ r.params = make(url.Values)
+ }
+ for k, v := range locator.Query() {
+ r.params[k] = v
+ }
+ }
+ return r
+}
+
+// Param creates a query parameter with the given string value.
+func (r *Request) Param(paramName, s string) *Request {
+ if r.err != nil {
+ return r
+ }
+ return r.setParam(paramName, s)
+}
+
+// VersionedParams will take the provided object, serialize it to a map[string][]string using the
+// implicit RESTClient API version and the default parameter codec, and then add those as parameters
+// to the request. Use this to provide versioned query parameters from client libraries.
+// VersionedParams will not write query parameters that have omitempty set and are empty. If a
+// parameter has already been set it is appended to (Params and VersionedParams are additive).
+func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
+ return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion)
+}
+
+func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
+ if r.err != nil {
+ return r
+ }
+ params, err := codec.EncodeParameters(obj, version)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ for k, v := range params {
+ if r.params == nil {
+ r.params = make(url.Values)
+ }
+ r.params[k] = append(r.params[k], v...)
+ }
+ return r
+}
+
+func (r *Request) setParam(paramName, value string) *Request {
+ if r.params == nil {
+ r.params = make(url.Values)
+ }
+ r.params[paramName] = append(r.params[paramName], value)
+ return r
+}
+
+func (r *Request) SetHeader(key string, values ...string) *Request {
+ if r.headers == nil {
+ r.headers = http.Header{}
+ }
+ r.headers.Del(key)
+ for _, value := range values {
+ r.headers.Add(key, value)
+ }
+ return r
+}
+
+// Timeout makes the request use the given duration as an overall timeout for the
+// request. Additionally, if set passes the value as "timeout" parameter in URL.
+func (r *Request) Timeout(d time.Duration) *Request {
+ if r.err != nil {
+ return r
+ }
+ r.timeout = d
+ return r
+}
+
+// Body makes the request use obj as the body. Optional.
+// If obj is a string, try to read a file of that name.
+// If obj is a []byte, send it directly.
+// If obj is an io.Reader, use it directly.
+// If obj is a runtime.Object, marshal it correctly, and set Content-Type header.
+// If obj is a runtime.Object and nil, do nothing.
+// Otherwise, set an error.
+func (r *Request) Body(obj interface{}) *Request {
+ if r.err != nil {
+ return r
+ }
+ switch t := obj.(type) {
+ case string:
+ data, err := ioutil.ReadFile(t)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ glogBody("Request Body", data)
+ r.body = bytes.NewReader(data)
+ case []byte:
+ glogBody("Request Body", t)
+ r.body = bytes.NewReader(t)
+ case io.Reader:
+ r.body = t
+ case runtime.Object:
+ // callers may pass typed interface pointers, therefore we must check nil with reflection
+ if reflect.ValueOf(t).IsNil() {
+ return r
+ }
+ data, err := runtime.Encode(r.serializers.Encoder, t)
+ if err != nil {
+ r.err = err
+ return r
+ }
+ glogBody("Request Body", data)
+ r.body = bytes.NewReader(data)
+ r.SetHeader("Content-Type", r.content.ContentType)
+ default:
+ r.err = fmt.Errorf("unknown type used for body: %+v", obj)
+ }
+ return r
+}
+
+// Context adds a context to the request. Contexts are only used for
+// timeouts, deadlines, and cancellations.
+func (r *Request) Context(ctx context.Context) *Request {
+ r.ctx = ctx
+ return r
+}
+
+// URL returns the current working URL.
+func (r *Request) URL() *url.URL {
+ p := r.pathPrefix
+ if r.namespaceSet && len(r.namespace) > 0 {
+ p = path.Join(p, "namespaces", r.namespace)
+ }
+ if len(r.resource) != 0 {
+ p = path.Join(p, strings.ToLower(r.resource))
+ }
+ // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed
+ if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 {
+ p = path.Join(p, r.resourceName, r.subresource, r.subpath)
+ }
+
+ finalURL := &url.URL{}
+ if r.baseURL != nil {
+ *finalURL = *r.baseURL
+ }
+ finalURL.Path = p
+
+ query := url.Values{}
+ for key, values := range r.params {
+ for _, value := range values {
+ query.Add(key, value)
+ }
+ }
+
+ // timeout is handled specially here.
+ if r.timeout != 0 {
+ query.Set("timeout", r.timeout.String())
+ }
+ finalURL.RawQuery = query.Encode()
+ return finalURL
+}
+
+// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
+// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
+// parameters will be reset. This creates a copy of the url so as not to change the
+// underlying object.
+func (r Request) finalURLTemplate() url.URL {
+ newParams := url.Values{}
+ v := []string{"{value}"}
+ for k := range r.params {
+ newParams[k] = v
+ }
+ r.params = newParams
+ url := r.URL()
+ segments := strings.Split(r.URL().Path, "/")
+ groupIndex := 0
+ index := 0
+ if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) {
+ groupIndex += len(strings.Split(r.baseURL.Path, "/"))
+ }
+ if groupIndex >= len(segments) {
+ return *url
+ }
+
+ const CoreGroupPrefix = "api"
+ const NamedGroupPrefix = "apis"
+ isCoreGroup := segments[groupIndex] == CoreGroupPrefix
+ isNamedGroup := segments[groupIndex] == NamedGroupPrefix
+ if isCoreGroup {
+ // checking the case of core group with /api/v1/... format
+ index = groupIndex + 2
+ } else if isNamedGroup {
+ // checking the case of named group with /apis/apps/v1/... format
+ index = groupIndex + 3
+ } else {
+ // this should not happen that the only two possibilities are /api... and /apis..., just want to put an
+ // outlet here in case more API groups are added in future if ever possible:
+ // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
+ // if a wrong API groups name is encountered, return the {prefix} for url.Path
+ url.Path = "/{prefix}"
+ url.RawQuery = ""
+ return *url
+ }
+ //switch segLength := len(segments) - index; segLength {
+ switch {
+ // case len(segments) - index == 1:
+ // resource (with no name) do nothing
+ case len(segments)-index == 2:
+ // /$RESOURCE/$NAME: replace $NAME with {name}
+ segments[index+1] = "{name}"
+ case len(segments)-index == 3:
+ if segments[index+2] == "finalize" || segments[index+2] == "status" {
+ // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+ segments[index+1] = "{name}"
+ } else {
+ // /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace}
+ segments[index+1] = "{namespace}"
+ }
+ case len(segments)-index >= 4:
+ segments[index+1] = "{namespace}"
+ // /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name}
+ if segments[index+3] != "finalize" && segments[index+3] != "status" {
+ // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+ segments[index+3] = "{name}"
+ }
+ }
+ url.Path = path.Join(segments...)
+ return *url
+}
+
+func (r *Request) tryThrottle() {
+ now := time.Now()
+ if r.throttle != nil {
+ r.throttle.Accept()
+ }
+ if latency := time.Since(now); latency > longThrottleLatency {
+ klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
+ }
+}
+
+// Watch attempts to begin watching the requested location.
+// Returns a watch.Interface, or an error.
+func (r *Request) Watch() (watch.Interface, error) {
+ return r.WatchWithSpecificDecoders(
+ func(body io.ReadCloser) streaming.Decoder {
+ framer := r.serializers.Framer.NewFrameReader(body)
+ return streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
+ },
+ r.serializers.Decoder,
+ )
+}
+
+// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder.
+// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content
+// Returns a watch.Interface, or an error.
+func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) {
+ // We specifically don't want to rate limit watches, so we
+ // don't use r.throttle here.
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.serializers.Framer == nil {
+ return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType)
+ }
+
+ url := r.URL().String()
+ req, err := http.NewRequest(r.verb, url, r.body)
+ if err != nil {
+ return nil, err
+ }
+ if r.ctx != nil {
+ req = req.WithContext(r.ctx)
+ }
+ req.Header = r.headers
+ client := r.client
+ if client == nil {
+ client = http.DefaultClient
+ }
+ r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+ resp, err := client.Do(req)
+ updateURLMetrics(r, resp, err)
+ if r.baseURL != nil {
+ if err != nil {
+ r.backoffMgr.UpdateBackoff(r.baseURL, err, 0)
+ } else {
+ r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode)
+ }
+ }
+ if err != nil {
+ // The watch stream mechanism handles many common partial data errors, so closed
+ // connections can be retried in many cases.
+ if net.IsProbableEOF(err) {
+ return watch.NewEmptyWatch(), nil
+ }
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ defer resp.Body.Close()
+ if result := r.transformResponse(resp, req); result.err != nil {
+ return nil, result.err
+ }
+ return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode)
+ }
+ wrapperDecoder := wrapperDecoderFn(resp.Body)
+ return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil
+}
+
+// updateURLMetrics is a convenience function for pushing metrics.
+// It also handles corner cases for incomplete/invalid request data.
+func updateURLMetrics(req *Request, resp *http.Response, err error) {
+ url := "none"
+ if req.baseURL != nil {
+ url = req.baseURL.Host
+ }
+
+ // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric
+ // system so we just report them as `<error>`.
+ if err != nil {
+ metrics.RequestResult.Increment("<error>", req.verb, url)
+ } else {
+ //Metrics for failure codes
+ metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url)
+ }
+}
+
+// Stream formats and executes the request, and offers streaming of the response.
+// Returns io.ReadCloser which could be used for streaming of the response, or an error
+// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object.
+// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response.
+func (r *Request) Stream() (io.ReadCloser, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+
+ r.tryThrottle()
+
+ url := r.URL().String()
+ req, err := http.NewRequest(r.verb, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ if r.ctx != nil {
+ req = req.WithContext(r.ctx)
+ }
+ req.Header = r.headers
+ client := r.client
+ if client == nil {
+ client = http.DefaultClient
+ }
+ r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+ resp, err := client.Do(req)
+ updateURLMetrics(r, resp, err)
+ if r.baseURL != nil {
+ if err != nil {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+ } else {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ switch {
+ case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
+ return resp.Body, nil
+
+ default:
+ // ensure we close the body before returning the error
+ defer resp.Body.Close()
+
+ result := r.transformResponse(resp, req)
+ err := result.Error()
+ if err == nil {
+ err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
+ }
+ return nil, err
+ }
+}
+
+// request connects to the server and invokes the provided function when a server response is
+// received. It handles retry behavior and up front validation of requests. It will invoke
+// fn at most once. It will return an error if a problem occurred prior to connecting to the
+// server - the provided function is responsible for handling server errors.
+func (r *Request) request(fn func(*http.Request, *http.Response)) error {
+ //Metrics for total request latency
+ start := time.Now()
+ defer func() {
+ metrics.RequestLatency.Observe(r.verb, r.finalURLTemplate(), time.Since(start))
+ }()
+
+ if r.err != nil {
+ klog.V(4).Infof("Error in request: %v", r.err)
+ return r.err
+ }
+
+ // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace)
+ if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 {
+ return fmt.Errorf("an empty namespace may not be set when a resource name is provided")
+ }
+ if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 {
+ return fmt.Errorf("an empty namespace may not be set during creation")
+ }
+
+ client := r.client
+ if client == nil {
+ client = http.DefaultClient
+ }
+
+ // Right now we make about ten retry attempts if we get a Retry-After response.
+ maxRetries := 10
+ retries := 0
+ for {
+ url := r.URL().String()
+ req, err := http.NewRequest(r.verb, url, r.body)
+ if err != nil {
+ return err
+ }
+ if r.timeout > 0 {
+ if r.ctx == nil {
+ r.ctx = context.Background()
+ }
+ var cancelFn context.CancelFunc
+ r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout)
+ defer cancelFn()
+ }
+ if r.ctx != nil {
+ req = req.WithContext(r.ctx)
+ }
+ req.Header = r.headers
+
+ r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+ if retries > 0 {
+ // We are retrying the request that we already send to apiserver
+ // at least once before.
+ // This request should also be throttled with the client-internal throttler.
+ r.tryThrottle()
+ }
+ resp, err := client.Do(req)
+ updateURLMetrics(r, resp, err)
+ if err != nil {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+ } else {
+ r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+ }
+ if err != nil {
+ // "Connection reset by peer" is usually a transient error.
+ // Thus in case of "GET" operations, we simply retry it.
+ // We are not automatically retrying "write" operations, as
+ // they are not idempotent.
+ if !net.IsConnectionReset(err) || r.verb != "GET" {
+ return err
+ }
+ // For the purpose of retry, we set the artificial "retry-after" response.
+ // TODO: Should we clean the original response if it exists?
+ resp = &http.Response{
+ StatusCode: http.StatusInternalServerError,
+ Header: http.Header{"Retry-After": []string{"1"}},
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+
+ done := func() bool {
+ // Ensure the response body is fully read and closed
+ // before we reconnect, so that we reuse the same TCP
+ // connection.
+ defer func() {
+ const maxBodySlurpSize = 2 << 10
+ if resp.ContentLength <= maxBodySlurpSize {
+ io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
+ }
+ resp.Body.Close()
+ }()
+
+ retries++
+ if seconds, wait := checkWait(resp); wait && retries < maxRetries {
+ if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
+ _, err := seeker.Seek(0, 0)
+ if err != nil {
+ klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
+ fn(req, resp)
+ return true
+ }
+ }
+
+ klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
+ r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
+ return false
+ }
+ fn(req, resp)
+ return true
+ }()
+ if done {
+ return nil
+ }
+ }
+}
+
+// Do formats and executes the request. Returns a Result object for easy response
+// processing.
+//
+// Error type:
+// * If the request can't be constructed, or an error happened earlier while building its
+// arguments: *RequestConstructionError
+// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
+// * http.Client.Do errors are returned directly.
+func (r *Request) Do() Result {
+ r.tryThrottle()
+
+ var result Result
+ err := r.request(func(req *http.Request, resp *http.Response) {
+ result = r.transformResponse(resp, req)
+ })
+ if err != nil {
+ return Result{err: err}
+ }
+ return result
+}
+
+// DoRaw executes the request but does not process the response body.
+func (r *Request) DoRaw() ([]byte, error) {
+ r.tryThrottle()
+
+ var result Result
+ err := r.request(func(req *http.Request, resp *http.Response) {
+ result.body, result.err = ioutil.ReadAll(resp.Body)
+ glogBody("Response Body", result.body)
+ if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
+ result.err = r.transformUnstructuredResponseError(resp, req, result.body)
+ }
+ })
+ if err != nil {
+ return nil, err
+ }
+ return result.body, result.err
+}
+
+// transformResponse converts an API response into a structured API object
+func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
+ var body []byte
+ if resp.Body != nil {
+ data, err := ioutil.ReadAll(resp.Body)
+ switch err.(type) {
+ case nil:
+ body = data
+ case http2.StreamError:
+ // This is trying to catch the scenario that the server may close the connection when sending the
+ // response body. This can be caused by server timeout due to a slow network connection.
+ // TODO: Add test for this. Steps may be:
+ // 1. client-go (or kubectl) sends a GET request.
+ // 2. Apiserver sends back the headers and then part of the body
+ // 3. Apiserver closes connection.
+ // 4. client-go should catch this and return an error.
+ klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
+ streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err)
+ return Result{
+ err: streamErr,
+ }
+ default:
+ klog.Errorf("Unexpected error when reading response body: %#v", err)
+ unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err)
+ return Result{
+ err: unexpectedErr,
+ }
+ }
+ }
+
+ glogBody("Response Body", body)
+
+ // verify the content type is accurate
+ contentType := resp.Header.Get("Content-Type")
+ decoder := r.serializers.Decoder
+ if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) {
+ mediaType, params, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return Result{err: errors.NewInternalError(err)}
+ }
+ decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params)
+ if err != nil {
+ // if we fail to negotiate a decoder, treat this as an unstructured error
+ switch {
+ case resp.StatusCode == http.StatusSwitchingProtocols:
+ // no-op, we've been upgraded
+ case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+ return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
+ }
+ return Result{
+ body: body,
+ contentType: contentType,
+ statusCode: resp.StatusCode,
+ }
+ }
+ }
+
+ switch {
+ case resp.StatusCode == http.StatusSwitchingProtocols:
+ // no-op, we've been upgraded
+ case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+ // calculate an unstructured error from the response which the Result object may use if the caller
+ // did not return a structured error.
+ retryAfter, _ := retryAfterSeconds(resp)
+ err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+ return Result{
+ body: body,
+ contentType: contentType,
+ statusCode: resp.StatusCode,
+ decoder: decoder,
+ err: err,
+ }
+ }
+
+ return Result{
+ body: body,
+ contentType: contentType,
+ statusCode: resp.StatusCode,
+ decoder: decoder,
+ }
+}
+
+// truncateBody decides if the body should be truncated, based on the glog Verbosity.
+func truncateBody(body string) string {
+ max := 0
+ switch {
+ case bool(klog.V(10)):
+ return body
+ case bool(klog.V(9)):
+ max = 10240
+ case bool(klog.V(8)):
+ max = 1024
+ }
+
+ if len(body) <= max {
+ return body
+ }
+
+ return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
+}
+
+// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
+// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
+// whether the body is printable.
+func glogBody(prefix string, body []byte) {
+ if klog.V(8) {
+ if bytes.IndexFunc(body, func(r rune) bool {
+ return r < 0x0a
+ }) != -1 {
+ klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
+ } else {
+ klog.Infof("%s: %s", prefix, truncateBody(string(body)))
+ }
+ }
+}
+
+// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error.
+const maxUnstructuredResponseTextBytes = 2048
+
+// transformUnstructuredResponseError handles an error from the server that is not in a structured form.
+// It is expected to transform any response that is not recognizable as a clear server sent error from the
+// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries
+// introduce a level of uncertainty to the responses returned by servers that in common use result in
+// unexpected responses. The rough structure is:
+//
+// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
+// - this is the happy path
+// - when you get this output, trust what the server sends
+// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
+// generate a reasonable facsimile of the original failure.
+// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
+// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
+// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
+// initial contact, the presence of mismatched body contents from posted content types
+// - Give these a separate distinct error type and capture as much as possible of the original message
+//
+// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
+func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
+ if body == nil && resp.Body != nil {
+ if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil {
+ body = data
+ }
+ }
+ retryAfter, _ := retryAfterSeconds(resp)
+ return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+}
+
+// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body.
+func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error {
+ // cap the amount of output we create
+ if len(body) > maxUnstructuredResponseTextBytes {
+ body = body[:maxUnstructuredResponseTextBytes]
+ }
+
+ message := "unknown"
+ if isTextResponse {
+ message = strings.TrimSpace(string(body))
+ }
+ var groupResource schema.GroupResource
+ if len(r.resource) > 0 {
+ groupResource.Group = r.content.GroupVersion.Group
+ groupResource.Resource = r.resource
+ }
+ return errors.NewGenericServerResponse(
+ statusCode,
+ method,
+ groupResource,
+ r.resourceName,
+ message,
+ retryAfter,
+ true,
+ )
+}
+
+// isTextResponse returns true if the response appears to be a textual media type.
+func isTextResponse(resp *http.Response) bool {
+ contentType := resp.Header.Get("Content-Type")
+ if len(contentType) == 0 {
+ return true
+ }
+ media, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return false
+ }
+ return strings.HasPrefix(media, "text/")
+}
+
+// checkWait returns true along with a number of seconds if the server instructed us to wait
+// before retrying.
+func checkWait(resp *http.Response) (int, bool) {
+ switch r := resp.StatusCode; {
+ // any 500 error code and 429 can trigger a wait
+ case r == http.StatusTooManyRequests, r >= 500:
+ default:
+ return 0, false
+ }
+ i, ok := retryAfterSeconds(resp)
+ return i, ok
+}
+
+// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
+// the header was missing or not a valid number.
+func retryAfterSeconds(resp *http.Response) (int, bool) {
+ if h := resp.Header.Get("Retry-After"); len(h) > 0 {
+ if i, err := strconv.Atoi(h); err == nil {
+ return i, true
+ }
+ }
+ return 0, false
+}
+
+// Result contains the result of calling Request.Do().
+type Result struct {
+ body []byte
+ contentType string
+ err error
+ statusCode int
+
+ decoder runtime.Decoder
+}
+
+// Raw returns the raw result.
+func (r Result) Raw() ([]byte, error) {
+ return r.body, r.err
+}
+
+// Get returns the result as an object, which means it passes through the decoder.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Get() (runtime.Object, error) {
+ if r.err != nil {
+ // Check whether the result has a Status object in the body and prefer that.
+ return nil, r.Error()
+ }
+ if r.decoder == nil {
+ return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+ }
+
+ // decode, but if the result is Status return that as an error instead.
+ out, _, err := r.decoder.Decode(r.body, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ switch t := out.(type) {
+ case *metav1.Status:
+ // any status besides StatusSuccess is considered an error.
+ if t.Status != metav1.StatusSuccess {
+ return nil, errors.FromObject(t)
+ }
+ }
+ return out, nil
+}
+
+// StatusCode returns the HTTP status code of the request. (Only valid if no
+// error was returned.)
+func (r Result) StatusCode(statusCode *int) Result {
+ *statusCode = r.statusCode
+ return r
+}
+
+// Into stores the result into obj, if possible. If obj is nil it is ignored.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Into(obj runtime.Object) error {
+ if r.err != nil {
+ // Check whether the result has a Status object in the body and prefer that.
+ return r.Error()
+ }
+ if r.decoder == nil {
+ return fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+ }
+ if len(r.body) == 0 {
+ return fmt.Errorf("0-length response with status code: %d and content type: %s",
+ r.statusCode, r.contentType)
+ }
+
+ out, _, err := r.decoder.Decode(r.body, nil, obj)
+ if err != nil || out == obj {
+ return err
+ }
+ // if a different object is returned, see if it is Status and avoid double decoding
+ // the object.
+ switch t := out.(type) {
+ case *metav1.Status:
+ // any status besides StatusSuccess is considered an error.
+ if t.Status != metav1.StatusSuccess {
+ return errors.FromObject(t)
+ }
+ }
+ return nil
+}
+
+// WasCreated updates the provided bool pointer to whether the server returned
+// 201 created or a different response.
+func (r Result) WasCreated(wasCreated *bool) Result {
+ *wasCreated = r.statusCode == http.StatusCreated
+ return r
+}
+
+// Error returns the error executing the request, nil if no error occurred.
+// If the returned object is of type Status and has Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+// See the Request.Do() comment for what errors you might get.
+func (r Result) Error() error {
+ // if we have received an unexpected server error, and we have a body and decoder, we can try to extract
+ // a Status object.
+ if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil {
+ return r.err
+ }
+
+ // attempt to convert the body into a Status object
+ // to be backwards compatible with old servers that do not return a version, default to "v1"
+ out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
+ if err != nil {
+ klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
+ return r.err
+ }
+ switch t := out.(type) {
+ case *metav1.Status:
+ // because we default the kind, we *must* check for StatusFailure
+ if t.Status == metav1.StatusFailure {
+ return errors.FromObject(t)
+ }
+ }
+ return r.err
+}
+
+// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store)
+var NameMayNotBe = []string{".", ".."}
+
+// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store)
+var NameMayNotContain = []string{"/", "%"}
+
+// IsValidPathSegmentName validates the name can be safely encoded as a path segment
+func IsValidPathSegmentName(name string) []string {
+ for _, illegalName := range NameMayNotBe {
+ if name == illegalName {
+ return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
+ }
+ }
+
+ var errors []string
+ for _, illegalContent := range NameMayNotContain {
+ if strings.Contains(name, illegalContent) {
+ errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+ }
+ }
+
+ return errors
+}
+
+// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
+// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
+func IsValidPathSegmentPrefix(name string) []string {
+ var errors []string
+ for _, illegalContent := range NameMayNotContain {
+ if strings.Contains(name, illegalContent) {
+ errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+ }
+ }
+
+ return errors
+}
+
+// ValidatePathSegmentName validates the name can be safely encoded as a path segment
+func ValidatePathSegmentName(name string, prefix bool) []string {
+ if prefix {
+ return IsValidPathSegmentPrefix(name)
+ }
+ return IsValidPathSegmentName(name)
+}
diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go
new file mode 100644
index 0000000..bd5749d
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/transport.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "crypto/tls"
+ "errors"
+ "net/http"
+
+ "k8s.io/client-go/plugin/pkg/client/auth/exec"
+ "k8s.io/client-go/transport"
+)
+
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func TLSConfigFor(config *Config) (*tls.Config, error) {
+ cfg, err := config.TransportConfig()
+ if err != nil {
+ return nil, err
+ }
+ return transport.TLSConfigFor(cfg)
+}
+
+// TransportFor returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config. Will return the
+// default http.DefaultTransport if no special case behavior is needed.
+func TransportFor(config *Config) (http.RoundTripper, error) {
+ cfg, err := config.TransportConfig()
+ if err != nil {
+ return nil, err
+ }
+ return transport.New(cfg)
+}
+
+// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the
+// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack
+// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use
+// the higher level TransportFor or RESTClientFor methods.
+func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
+ cfg, err := config.TransportConfig()
+ if err != nil {
+ return nil, err
+ }
+ return transport.HTTPWrappersForConfig(cfg, rt)
+}
+
+// TransportConfig converts a client config to an appropriate transport config.
+func (c *Config) TransportConfig() (*transport.Config, error) {
+ conf := &transport.Config{
+ UserAgent: c.UserAgent,
+ Transport: c.Transport,
+ WrapTransport: c.WrapTransport,
+ TLS: transport.TLSConfig{
+ Insecure: c.Insecure,
+ ServerName: c.ServerName,
+ CAFile: c.CAFile,
+ CAData: c.CAData,
+ CertFile: c.CertFile,
+ CertData: c.CertData,
+ KeyFile: c.KeyFile,
+ KeyData: c.KeyData,
+ },
+ Username: c.Username,
+ Password: c.Password,
+ BearerToken: c.BearerToken,
+ Impersonate: transport.ImpersonationConfig{
+ UserName: c.Impersonate.UserName,
+ Groups: c.Impersonate.Groups,
+ Extra: c.Impersonate.Extra,
+ },
+ Dial: c.Dial,
+ }
+
+ if c.ExecProvider != nil && c.AuthProvider != nil {
+ return nil, errors.New("execProvider and authProvider cannot be used in combination")
+ }
+
+ if c.ExecProvider != nil {
+ provider, err := exec.GetAuthenticator(c.ExecProvider)
+ if err != nil {
+ return nil, err
+ }
+ if err := provider.UpdateTransportConfig(conf); err != nil {
+ return nil, err
+ }
+ }
+ if c.AuthProvider != nil {
+ provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister)
+ if err != nil {
+ return nil, err
+ }
+ conf.Wrap(provider.WrapTransport)
+ }
+ return conf, nil
+}
+
+// Wrap adds a transport middleware function that will give the caller
+// an opportunity to wrap the underlying http.RoundTripper prior to the
+// first API call being made. The provided function is invoked after any
+// existing transport wrappers are invoked.
+func (c *Config) Wrap(fn transport.WrapperFunc) {
+ c.WrapTransport = transport.Wrappers(c.WrapTransport, fn)
+}
diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go
new file mode 100644
index 0000000..a56d183
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/url_utils.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "fmt"
+ "net/url"
+ "path"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
+// to use with a Client at a given API version following the standard conventions for a
+// Kubernetes API.
+func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) {
+ if host == "" {
+ return nil, "", fmt.Errorf("host must be a URL or a host:port pair")
+ }
+ base := host
+ hostURL, err := url.Parse(base)
+ if err != nil || hostURL.Scheme == "" || hostURL.Host == "" {
+ scheme := "http://"
+ if defaultTLS {
+ scheme = "https://"
+ }
+ hostURL, err = url.Parse(scheme + base)
+ if err != nil {
+ return nil, "", err
+ }
+ if hostURL.Path != "" && hostURL.Path != "/" {
+ return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base)
+ }
+ }
+
+ // hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to
+ // all URIs used to access the host. this is useful when there's a proxy in front of the
+ // apiserver that has relocated the apiserver endpoints, forwarding all requests from, for
+ // example, /a/b/c to the apiserver. in this case the Path should be /a/b/c.
+ //
+ // if running without a frontend proxy (that changes the location of the apiserver), then
+ // hostURL.Path should be blank.
+ //
+ // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base
+ versionedAPIPath := DefaultVersionedAPIPath(apiPath, groupVersion)
+
+ return hostURL, versionedAPIPath, nil
+}
+
+// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given
+// API path, following the standard conventions of the Kubernetes API.
+func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string {
+ versionedAPIPath := path.Join("/", apiPath)
+
+ // Add the version to the end of the path
+ if len(groupVersion.Group) > 0 {
+ versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version)
+
+ } else {
+ versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version)
+ }
+
+ return versionedAPIPath
+}
+
+// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
+// requires Host and Version to be set prior to being called.
+func defaultServerUrlFor(config *Config) (*url.URL, string, error) {
+ // TODO: move the default to secure when the apiserver supports TLS by default
+ // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
+ hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0
+ hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0
+ defaultTLS := hasCA || hasCert || config.Insecure
+ host := config.Host
+ if host == "" {
+ host = "localhost"
+ }
+
+ if config.GroupVersion != nil {
+ return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS)
+ }
+ return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS)
+}
diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go
new file mode 100644
index 0000000..d00e42f
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/urlbackoff.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "net/url"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/client-go/util/flowcontrol"
+ "k8s.io/klog"
+)
+
+// Set of resp. Codes that we backoff for.
+// In general these should be errors that indicate a server is overloaded.
+// These shouldn't be configured by any user, we set them based on conventions
+// described in
+var serverIsOverloadedSet = sets.NewInt(429)
+var maxResponseCode = 499
+
+type BackoffManager interface {
+ UpdateBackoff(actualUrl *url.URL, err error, responseCode int)
+ CalculateBackoff(actualUrl *url.URL) time.Duration
+ Sleep(d time.Duration)
+}
+
+// URLBackoff struct implements the semantics on top of Backoff which
+// we need for URL specific exponential backoff.
+type URLBackoff struct {
+ // Uses backoff as underlying implementation.
+ Backoff *flowcontrol.Backoff
+}
+
+// NoBackoff is a stub implementation, can be used for mocking or else as a default.
+type NoBackoff struct {
+}
+
+func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+ // do nothing.
+}
+
+func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+ return 0 * time.Second
+}
+
+func (n *NoBackoff) Sleep(d time.Duration) {
+ time.Sleep(d)
+}
+
+// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
+// by tests which want to run 1000s of mock requests without slowing down.
+func (b *URLBackoff) Disable() {
+ klog.V(4).Infof("Disabling backoff strategy")
+ b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
+}
+
+// baseUrlKey returns the key which urls will be mapped to.
+// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080.
+func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
+ // Simple implementation for now, just the host.
+ // We may backoff specific paths (i.e. "pods") differentially
+ // in the future.
+ host, err := url.Parse(rawurl.String())
+ if err != nil {
+ klog.V(4).Infof("Error extracting url: %v", rawurl)
+ panic("bad url!")
+ }
+ return host.Host
+}
+
+// UpdateBackoff updates backoff metadata
+func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+ // range for retry counts that we store is [0,13]
+ if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) {
+ b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
+ return
+ } else if responseCode >= 300 || err != nil {
+ klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
+ }
+
+ //If we got this far, there is no backoff required for this URL anymore.
+ b.Backoff.Reset(b.baseUrlKey(actualUrl))
+}
+
+// CalculateBackoff takes a url and back's off exponentially,
+// based on its knowledge of existing failures.
+func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+ return b.Backoff.Get(b.baseUrlKey(actualUrl))
+}
+
+func (b *URLBackoff) Sleep(d time.Duration) {
+ b.Backoff.Clock.Sleep(d)
+}
diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go
new file mode 100644
index 0000000..73bb63a
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/watch/decoder.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioned
+
+import (
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+// Decoder implements the watch.Decoder interface for io.ReadClosers that
+// have contents which consist of a series of watchEvent objects encoded
+// with the given streaming decoder. The internal objects will be then
+// decoded by the embedded decoder.
+type Decoder struct {
+ decoder streaming.Decoder
+ embeddedDecoder runtime.Decoder
+}
+
+// NewDecoder creates an Decoder for the given writer and codec.
+func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder {
+ return &Decoder{
+ decoder: decoder,
+ embeddedDecoder: embeddedDecoder,
+ }
+}
+
+// Decode blocks until it can return the next object in the reader. Returns an error
+// if the reader is closed or an object can't be decoded.
+func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
+ var got metav1.WatchEvent
+ res, _, err := d.decoder.Decode(nil, &got)
+ if err != nil {
+ return "", nil, err
+ }
+ if res != &got {
+ return "", nil, fmt.Errorf("unable to decode to metav1.Event")
+ }
+ switch got.Type {
+ case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error):
+ default:
+ return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type)
+ }
+
+ obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw)
+ if err != nil {
+ return "", nil, fmt.Errorf("unable to decode watch event: %v", err)
+ }
+ return watch.EventType(got.Type), obj, nil
+}
+
+// Close closes the underlying r.
+func (d *Decoder) Close() {
+ d.decoder.Close()
+}
diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go
new file mode 100644
index 0000000..e55aa12
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/watch/encoder.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioned
+
+import (
+ "encoding/json"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+// Encoder serializes watch.Events into io.Writer. The internal objects
+// are encoded using embedded encoder, and the outer Event is serialized
+// using encoder.
+// TODO: this type is only used by tests
+type Encoder struct {
+ encoder streaming.Encoder
+ embeddedEncoder runtime.Encoder
+}
+
+func NewEncoder(encoder streaming.Encoder, embeddedEncoder runtime.Encoder) *Encoder {
+ return &Encoder{
+ encoder: encoder,
+ embeddedEncoder: embeddedEncoder,
+ }
+}
+
+// Encode writes an event to the writer. Returns an error
+// if the writer is closed or an object can't be encoded.
+func (e *Encoder) Encode(event *watch.Event) error {
+ data, err := runtime.Encode(e.embeddedEncoder, event.Object)
+ if err != nil {
+ return err
+ }
+ // FIXME: get rid of json.RawMessage.
+ return e.encoder.Encode(&metav1.WatchEvent{
+ Type: string(event.Type),
+ Object: runtime.RawExtension{Raw: json.RawMessage(data)},
+ })
+}
diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
new file mode 100644
index 0000000..c1ab45f
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
@@ -0,0 +1,52 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package rest
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) {
+ *out = *in
+ if in.CertData != nil {
+ in, out := &in.CertData, &out.CertData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.KeyData != nil {
+ in, out := &in.KeyData, &out.KeyData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.CAData != nil {
+ in, out := &in.CAData, &out.CAData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSClientConfig.
+func (in *TLSClientConfig) DeepCopy() *TLSClientConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSClientConfig)
+ in.DeepCopyInto(out)
+ return out
+}