Import of https://github.com/ciena/voltctl at commit 40d61fbf3f910ed4017cf67c9c79e8e1f82a33a5

Change-Id: I8464c59e60d76cb8612891db3303878975b5416c
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
new file mode 100644
index 0000000..dd06303
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -0,0 +1,1201 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"bytes"
+	"context"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"net/url"
+	"path"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/http2"
+	"k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	"k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/pkg/watch"
+	restclientwatch "k8s.io/client-go/rest/watch"
+	"k8s.io/client-go/tools/metrics"
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog"
+)
+
+var (
+	// longThrottleLatency defines threshold for logging requests. All requests being
+	// throttle for more than longThrottleLatency will be logged.
+	longThrottleLatency = 50 * time.Millisecond
+)
+
+// HTTPClient is an interface for testing a request object.
+type HTTPClient interface {
+	Do(req *http.Request) (*http.Response, error)
+}
+
+// ResponseWrapper is an interface for getting a response.
+// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream.
+type ResponseWrapper interface {
+	DoRaw() ([]byte, error)
+	Stream() (io.ReadCloser, error)
+}
+
+// RequestConstructionError is returned when there's an error assembling a request.
+type RequestConstructionError struct {
+	Err error
+}
+
+// Error returns a textual description of 'r'.
+func (r *RequestConstructionError) Error() string {
+	return fmt.Sprintf("request construction error: '%v'", r.Err)
+}
+
+// Request allows for building up a request to a server in a chained fashion.
+// Any errors are stored until the end of your call, so you only have to
+// check once.
+type Request struct {
+	// required
+	client HTTPClient
+	verb   string
+
+	baseURL     *url.URL
+	content     ContentConfig
+	serializers Serializers
+
+	// generic components accessible via method setters
+	pathPrefix string
+	subpath    string
+	params     url.Values
+	headers    http.Header
+
+	// structural elements of the request that are part of the Kubernetes API conventions
+	namespace    string
+	namespaceSet bool
+	resource     string
+	resourceName string
+	subresource  string
+	timeout      time.Duration
+
+	// output
+	err  error
+	body io.Reader
+
+	// This is only used for per-request timeouts, deadlines, and cancellations.
+	ctx context.Context
+
+	backoffMgr BackoffManager
+	throttle   flowcontrol.RateLimiter
+}
+
+// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
+func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request {
+	if backoff == nil {
+		klog.V(2).Infof("Not implementing request backoff strategy.")
+		backoff = &NoBackoff{}
+	}
+
+	pathPrefix := "/"
+	if baseURL != nil {
+		pathPrefix = path.Join(pathPrefix, baseURL.Path)
+	}
+	r := &Request{
+		client:      client,
+		verb:        verb,
+		baseURL:     baseURL,
+		pathPrefix:  path.Join(pathPrefix, versionedAPIPath),
+		content:     content,
+		serializers: serializers,
+		backoffMgr:  backoff,
+		throttle:    throttle,
+		timeout:     timeout,
+	}
+	switch {
+	case len(content.AcceptContentTypes) > 0:
+		r.SetHeader("Accept", content.AcceptContentTypes)
+	case len(content.ContentType) > 0:
+		r.SetHeader("Accept", content.ContentType+", */*")
+	}
+	return r
+}
+
+// Prefix adds segments to the relative beginning to the request path. These
+// items will be placed before the optional Namespace, Resource, or Name sections.
+// Setting AbsPath will clear any previously set Prefix segments
+func (r *Request) Prefix(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...))
+	return r
+}
+
+// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional
+// Namespace, Resource, or Name sections.
+func (r *Request) Suffix(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.subpath = path.Join(r.subpath, path.Join(segments...))
+	return r
+}
+
+// Resource sets the resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Resource(resource string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if len(r.resource) != 0 {
+		r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs)
+		return r
+	}
+	r.resource = resource
+	return r
+}
+
+// BackOff sets the request's backoff manager to the one specified,
+// or defaults to the stub implementation if nil is provided
+func (r *Request) BackOff(manager BackoffManager) *Request {
+	if manager == nil {
+		r.backoffMgr = &NoBackoff{}
+		return r
+	}
+
+	r.backoffMgr = manager
+	return r
+}
+
+// Throttle receives a rate-limiter and sets or replaces an existing request limiter
+func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request {
+	r.throttle = limiter
+	return r
+}
+
+// SubResource sets a sub-resource path which can be multiple segments after the resource
+// name but before the suffix.
+func (r *Request) SubResource(subresources ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	subresource := path.Join(subresources...)
+	if len(r.subresource) != 0 {
+		r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource)
+		return r
+	}
+	for _, s := range subresources {
+		if msgs := IsValidPathSegmentName(s); len(msgs) != 0 {
+			r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs)
+			return r
+		}
+	}
+	r.subresource = subresource
+	return r
+}
+
+// Name sets the name of a resource to access (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Name(resourceName string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if len(resourceName) == 0 {
+		r.err = fmt.Errorf("resource name may not be empty")
+		return r
+	}
+	if len(r.resourceName) != 0 {
+		r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs)
+		return r
+	}
+	r.resourceName = resourceName
+	return r
+}
+
+// Namespace applies the namespace scope to a request (<resource>/[ns/<namespace>/]<name>)
+func (r *Request) Namespace(namespace string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if r.namespaceSet {
+		r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs)
+		return r
+	}
+	r.namespaceSet = true
+	r.namespace = namespace
+	return r
+}
+
+// NamespaceIfScoped is a convenience function to set a namespace if scoped is true
+func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request {
+	if scoped {
+		return r.Namespace(namespace)
+	}
+	return r
+}
+
+// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved
+// when a single segment is passed.
+func (r *Request) AbsPath(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...))
+	if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
+		// preserve any trailing slashes for legacy behavior
+		r.pathPrefix += "/"
+	}
+	return r
+}
+
+// RequestURI overwrites existing path and parameters with the value of the provided server relative
+// URI.
+func (r *Request) RequestURI(uri string) *Request {
+	if r.err != nil {
+		return r
+	}
+	locator, err := url.Parse(uri)
+	if err != nil {
+		r.err = err
+		return r
+	}
+	r.pathPrefix = locator.Path
+	if len(locator.Query()) > 0 {
+		if r.params == nil {
+			r.params = make(url.Values)
+		}
+		for k, v := range locator.Query() {
+			r.params[k] = v
+		}
+	}
+	return r
+}
+
+// Param creates a query parameter with the given string value.
+func (r *Request) Param(paramName, s string) *Request {
+	if r.err != nil {
+		return r
+	}
+	return r.setParam(paramName, s)
+}
+
+// VersionedParams will take the provided object, serialize it to a map[string][]string using the
+// implicit RESTClient API version and the default parameter codec, and then add those as parameters
+// to the request. Use this to provide versioned query parameters from client libraries.
+// VersionedParams will not write query parameters that have omitempty set and are empty. If a
+// parameter has already been set it is appended to (Params and VersionedParams are additive).
+func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
+	return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion)
+}
+
+func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
+	if r.err != nil {
+		return r
+	}
+	params, err := codec.EncodeParameters(obj, version)
+	if err != nil {
+		r.err = err
+		return r
+	}
+	for k, v := range params {
+		if r.params == nil {
+			r.params = make(url.Values)
+		}
+		r.params[k] = append(r.params[k], v...)
+	}
+	return r
+}
+
+func (r *Request) setParam(paramName, value string) *Request {
+	if r.params == nil {
+		r.params = make(url.Values)
+	}
+	r.params[paramName] = append(r.params[paramName], value)
+	return r
+}
+
+func (r *Request) SetHeader(key string, values ...string) *Request {
+	if r.headers == nil {
+		r.headers = http.Header{}
+	}
+	r.headers.Del(key)
+	for _, value := range values {
+		r.headers.Add(key, value)
+	}
+	return r
+}
+
+// Timeout makes the request use the given duration as an overall timeout for the
+// request. Additionally, if set passes the value as "timeout" parameter in URL.
+func (r *Request) Timeout(d time.Duration) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.timeout = d
+	return r
+}
+
+// Body makes the request use obj as the body. Optional.
+// If obj is a string, try to read a file of that name.
+// If obj is a []byte, send it directly.
+// If obj is an io.Reader, use it directly.
+// If obj is a runtime.Object, marshal it correctly, and set Content-Type header.
+// If obj is a runtime.Object and nil, do nothing.
+// Otherwise, set an error.
+func (r *Request) Body(obj interface{}) *Request {
+	if r.err != nil {
+		return r
+	}
+	switch t := obj.(type) {
+	case string:
+		data, err := ioutil.ReadFile(t)
+		if err != nil {
+			r.err = err
+			return r
+		}
+		glogBody("Request Body", data)
+		r.body = bytes.NewReader(data)
+	case []byte:
+		glogBody("Request Body", t)
+		r.body = bytes.NewReader(t)
+	case io.Reader:
+		r.body = t
+	case runtime.Object:
+		// callers may pass typed interface pointers, therefore we must check nil with reflection
+		if reflect.ValueOf(t).IsNil() {
+			return r
+		}
+		data, err := runtime.Encode(r.serializers.Encoder, t)
+		if err != nil {
+			r.err = err
+			return r
+		}
+		glogBody("Request Body", data)
+		r.body = bytes.NewReader(data)
+		r.SetHeader("Content-Type", r.content.ContentType)
+	default:
+		r.err = fmt.Errorf("unknown type used for body: %+v", obj)
+	}
+	return r
+}
+
+// Context adds a context to the request. Contexts are only used for
+// timeouts, deadlines, and cancellations.
+func (r *Request) Context(ctx context.Context) *Request {
+	r.ctx = ctx
+	return r
+}
+
+// URL returns the current working URL.
+func (r *Request) URL() *url.URL {
+	p := r.pathPrefix
+	if r.namespaceSet && len(r.namespace) > 0 {
+		p = path.Join(p, "namespaces", r.namespace)
+	}
+	if len(r.resource) != 0 {
+		p = path.Join(p, strings.ToLower(r.resource))
+	}
+	// Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed
+	if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 {
+		p = path.Join(p, r.resourceName, r.subresource, r.subpath)
+	}
+
+	finalURL := &url.URL{}
+	if r.baseURL != nil {
+		*finalURL = *r.baseURL
+	}
+	finalURL.Path = p
+
+	query := url.Values{}
+	for key, values := range r.params {
+		for _, value := range values {
+			query.Add(key, value)
+		}
+	}
+
+	// timeout is handled specially here.
+	if r.timeout != 0 {
+		query.Set("timeout", r.timeout.String())
+	}
+	finalURL.RawQuery = query.Encode()
+	return finalURL
+}
+
+// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
+// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
+// parameters will be reset. This creates a copy of the url so as not to change the
+// underlying object.
+func (r Request) finalURLTemplate() url.URL {
+	newParams := url.Values{}
+	v := []string{"{value}"}
+	for k := range r.params {
+		newParams[k] = v
+	}
+	r.params = newParams
+	url := r.URL()
+	segments := strings.Split(r.URL().Path, "/")
+	groupIndex := 0
+	index := 0
+	if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) {
+		groupIndex += len(strings.Split(r.baseURL.Path, "/"))
+	}
+	if groupIndex >= len(segments) {
+		return *url
+	}
+
+	const CoreGroupPrefix = "api"
+	const NamedGroupPrefix = "apis"
+	isCoreGroup := segments[groupIndex] == CoreGroupPrefix
+	isNamedGroup := segments[groupIndex] == NamedGroupPrefix
+	if isCoreGroup {
+		// checking the case of core group with /api/v1/... format
+		index = groupIndex + 2
+	} else if isNamedGroup {
+		// checking the case of named group with /apis/apps/v1/... format
+		index = groupIndex + 3
+	} else {
+		// this should not happen that the only two possibilities are /api... and /apis..., just want to put an
+		// outlet here in case more API groups are added in future if ever possible:
+		// https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
+		// if a wrong API groups name is encountered, return the {prefix} for url.Path
+		url.Path = "/{prefix}"
+		url.RawQuery = ""
+		return *url
+	}
+	//switch segLength := len(segments) - index; segLength {
+	switch {
+	// case len(segments) - index == 1:
+	// resource (with no name) do nothing
+	case len(segments)-index == 2:
+		// /$RESOURCE/$NAME: replace $NAME with {name}
+		segments[index+1] = "{name}"
+	case len(segments)-index == 3:
+		if segments[index+2] == "finalize" || segments[index+2] == "status" {
+			// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+			segments[index+1] = "{name}"
+		} else {
+			// /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace}
+			segments[index+1] = "{namespace}"
+		}
+	case len(segments)-index >= 4:
+		segments[index+1] = "{namespace}"
+		// /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace},  $NAME with {name}
+		if segments[index+3] != "finalize" && segments[index+3] != "status" {
+			// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+			segments[index+3] = "{name}"
+		}
+	}
+	url.Path = path.Join(segments...)
+	return *url
+}
+
+func (r *Request) tryThrottle() {
+	now := time.Now()
+	if r.throttle != nil {
+		r.throttle.Accept()
+	}
+	if latency := time.Since(now); latency > longThrottleLatency {
+		klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
+	}
+}
+
+// Watch attempts to begin watching the requested location.
+// Returns a watch.Interface, or an error.
+func (r *Request) Watch() (watch.Interface, error) {
+	return r.WatchWithSpecificDecoders(
+		func(body io.ReadCloser) streaming.Decoder {
+			framer := r.serializers.Framer.NewFrameReader(body)
+			return streaming.NewDecoder(framer, r.serializers.StreamingSerializer)
+		},
+		r.serializers.Decoder,
+	)
+}
+
+// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder.
+// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content
+// Returns a watch.Interface, or an error.
+func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) {
+	// We specifically don't want to rate limit watches, so we
+	// don't use r.throttle here.
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.serializers.Framer == nil {
+		return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType)
+	}
+
+	url := r.URL().String()
+	req, err := http.NewRequest(r.verb, url, r.body)
+	if err != nil {
+		return nil, err
+	}
+	if r.ctx != nil {
+		req = req.WithContext(r.ctx)
+	}
+	req.Header = r.headers
+	client := r.client
+	if client == nil {
+		client = http.DefaultClient
+	}
+	r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+	resp, err := client.Do(req)
+	updateURLMetrics(r, resp, err)
+	if r.baseURL != nil {
+		if err != nil {
+			r.backoffMgr.UpdateBackoff(r.baseURL, err, 0)
+		} else {
+			r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode)
+		}
+	}
+	if err != nil {
+		// The watch stream mechanism handles many common partial data errors, so closed
+		// connections can be retried in many cases.
+		if net.IsProbableEOF(err) {
+			return watch.NewEmptyWatch(), nil
+		}
+		return nil, err
+	}
+	if resp.StatusCode != http.StatusOK {
+		defer resp.Body.Close()
+		if result := r.transformResponse(resp, req); result.err != nil {
+			return nil, result.err
+		}
+		return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode)
+	}
+	wrapperDecoder := wrapperDecoderFn(resp.Body)
+	return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil
+}
+
+// updateURLMetrics is a convenience function for pushing metrics.
+// It also handles corner cases for incomplete/invalid request data.
+func updateURLMetrics(req *Request, resp *http.Response, err error) {
+	url := "none"
+	if req.baseURL != nil {
+		url = req.baseURL.Host
+	}
+
+	// Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric
+	// system so we just report them as `<error>`.
+	if err != nil {
+		metrics.RequestResult.Increment("<error>", req.verb, url)
+	} else {
+		//Metrics for failure codes
+		metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url)
+	}
+}
+
+// Stream formats and executes the request, and offers streaming of the response.
+// Returns io.ReadCloser which could be used for streaming of the response, or an error
+// Any non-2xx http status code causes an error.  If we get a non-2xx code, we try to convert the body into an APIStatus object.
+// If we can, we return that as an error.  Otherwise, we create an error that lists the http status and the content of the response.
+func (r *Request) Stream() (io.ReadCloser, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+
+	r.tryThrottle()
+
+	url := r.URL().String()
+	req, err := http.NewRequest(r.verb, url, nil)
+	if err != nil {
+		return nil, err
+	}
+	if r.ctx != nil {
+		req = req.WithContext(r.ctx)
+	}
+	req.Header = r.headers
+	client := r.client
+	if client == nil {
+		client = http.DefaultClient
+	}
+	r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+	resp, err := client.Do(req)
+	updateURLMetrics(r, resp, err)
+	if r.baseURL != nil {
+		if err != nil {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+		} else {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	switch {
+	case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
+		return resp.Body, nil
+
+	default:
+		// ensure we close the body before returning the error
+		defer resp.Body.Close()
+
+		result := r.transformResponse(resp, req)
+		err := result.Error()
+		if err == nil {
+			err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
+		}
+		return nil, err
+	}
+}
+
+// request connects to the server and invokes the provided function when a server response is
+// received. It handles retry behavior and up front validation of requests. It will invoke
+// fn at most once. It will return an error if a problem occurred prior to connecting to the
+// server - the provided function is responsible for handling server errors.
+func (r *Request) request(fn func(*http.Request, *http.Response)) error {
+	//Metrics for total request latency
+	start := time.Now()
+	defer func() {
+		metrics.RequestLatency.Observe(r.verb, r.finalURLTemplate(), time.Since(start))
+	}()
+
+	if r.err != nil {
+		klog.V(4).Infof("Error in request: %v", r.err)
+		return r.err
+	}
+
+	// TODO: added to catch programmer errors (invoking operations with an object with an empty namespace)
+	if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 {
+		return fmt.Errorf("an empty namespace may not be set when a resource name is provided")
+	}
+	if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 {
+		return fmt.Errorf("an empty namespace may not be set during creation")
+	}
+
+	client := r.client
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	// Right now we make about ten retry attempts if we get a Retry-After response.
+	maxRetries := 10
+	retries := 0
+	for {
+		url := r.URL().String()
+		req, err := http.NewRequest(r.verb, url, r.body)
+		if err != nil {
+			return err
+		}
+		if r.timeout > 0 {
+			if r.ctx == nil {
+				r.ctx = context.Background()
+			}
+			var cancelFn context.CancelFunc
+			r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout)
+			defer cancelFn()
+		}
+		if r.ctx != nil {
+			req = req.WithContext(r.ctx)
+		}
+		req.Header = r.headers
+
+		r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL()))
+		if retries > 0 {
+			// We are retrying the request that we already send to apiserver
+			// at least once before.
+			// This request should also be throttled with the client-internal throttler.
+			r.tryThrottle()
+		}
+		resp, err := client.Do(req)
+		updateURLMetrics(r, resp, err)
+		if err != nil {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, 0)
+		} else {
+			r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode)
+		}
+		if err != nil {
+			// "Connection reset by peer" is usually a transient error.
+			// Thus in case of "GET" operations, we simply retry it.
+			// We are not automatically retrying "write" operations, as
+			// they are not idempotent.
+			if !net.IsConnectionReset(err) || r.verb != "GET" {
+				return err
+			}
+			// For the purpose of retry, we set the artificial "retry-after" response.
+			// TODO: Should we clean the original response if it exists?
+			resp = &http.Response{
+				StatusCode: http.StatusInternalServerError,
+				Header:     http.Header{"Retry-After": []string{"1"}},
+				Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+			}
+		}
+
+		done := func() bool {
+			// Ensure the response body is fully read and closed
+			// before we reconnect, so that we reuse the same TCP
+			// connection.
+			defer func() {
+				const maxBodySlurpSize = 2 << 10
+				if resp.ContentLength <= maxBodySlurpSize {
+					io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
+				}
+				resp.Body.Close()
+			}()
+
+			retries++
+			if seconds, wait := checkWait(resp); wait && retries < maxRetries {
+				if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
+					_, err := seeker.Seek(0, 0)
+					if err != nil {
+						klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
+						fn(req, resp)
+						return true
+					}
+				}
+
+				klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
+				r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
+				return false
+			}
+			fn(req, resp)
+			return true
+		}()
+		if done {
+			return nil
+		}
+	}
+}
+
+// Do formats and executes the request. Returns a Result object for easy response
+// processing.
+//
+// Error type:
+//  * If the request can't be constructed, or an error happened earlier while building its
+//    arguments: *RequestConstructionError
+//  * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
+//  * http.Client.Do errors are returned directly.
+func (r *Request) Do() Result {
+	r.tryThrottle()
+
+	var result Result
+	err := r.request(func(req *http.Request, resp *http.Response) {
+		result = r.transformResponse(resp, req)
+	})
+	if err != nil {
+		return Result{err: err}
+	}
+	return result
+}
+
+// DoRaw executes the request but does not process the response body.
+func (r *Request) DoRaw() ([]byte, error) {
+	r.tryThrottle()
+
+	var result Result
+	err := r.request(func(req *http.Request, resp *http.Response) {
+		result.body, result.err = ioutil.ReadAll(resp.Body)
+		glogBody("Response Body", result.body)
+		if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
+			result.err = r.transformUnstructuredResponseError(resp, req, result.body)
+		}
+	})
+	if err != nil {
+		return nil, err
+	}
+	return result.body, result.err
+}
+
+// transformResponse converts an API response into a structured API object
+func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result {
+	var body []byte
+	if resp.Body != nil {
+		data, err := ioutil.ReadAll(resp.Body)
+		switch err.(type) {
+		case nil:
+			body = data
+		case http2.StreamError:
+			// This is trying to catch the scenario that the server may close the connection when sending the
+			// response body. This can be caused by server timeout due to a slow network connection.
+			// TODO: Add test for this. Steps may be:
+			// 1. client-go (or kubectl) sends a GET request.
+			// 2. Apiserver sends back the headers and then part of the body
+			// 3. Apiserver closes connection.
+			// 4. client-go should catch this and return an error.
+			klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
+			streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err)
+			return Result{
+				err: streamErr,
+			}
+		default:
+			klog.Errorf("Unexpected error when reading response body: %#v", err)
+			unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err)
+			return Result{
+				err: unexpectedErr,
+			}
+		}
+	}
+
+	glogBody("Response Body", body)
+
+	// verify the content type is accurate
+	contentType := resp.Header.Get("Content-Type")
+	decoder := r.serializers.Decoder
+	if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) {
+		mediaType, params, err := mime.ParseMediaType(contentType)
+		if err != nil {
+			return Result{err: errors.NewInternalError(err)}
+		}
+		decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params)
+		if err != nil {
+			// if we fail to negotiate a decoder, treat this as an unstructured error
+			switch {
+			case resp.StatusCode == http.StatusSwitchingProtocols:
+				// no-op, we've been upgraded
+			case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+				return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
+			}
+			return Result{
+				body:        body,
+				contentType: contentType,
+				statusCode:  resp.StatusCode,
+			}
+		}
+	}
+
+	switch {
+	case resp.StatusCode == http.StatusSwitchingProtocols:
+		// no-op, we've been upgraded
+	case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+		// calculate an unstructured error from the response which the Result object may use if the caller
+		// did not return a structured error.
+		retryAfter, _ := retryAfterSeconds(resp)
+		err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+		return Result{
+			body:        body,
+			contentType: contentType,
+			statusCode:  resp.StatusCode,
+			decoder:     decoder,
+			err:         err,
+		}
+	}
+
+	return Result{
+		body:        body,
+		contentType: contentType,
+		statusCode:  resp.StatusCode,
+		decoder:     decoder,
+	}
+}
+
+// truncateBody decides if the body should be truncated, based on the glog Verbosity.
+func truncateBody(body string) string {
+	max := 0
+	switch {
+	case bool(klog.V(10)):
+		return body
+	case bool(klog.V(9)):
+		max = 10240
+	case bool(klog.V(8)):
+		max = 1024
+	}
+
+	if len(body) <= max {
+		return body
+	}
+
+	return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
+}
+
+// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against
+// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
+// whether the body is printable.
+func glogBody(prefix string, body []byte) {
+	if klog.V(8) {
+		if bytes.IndexFunc(body, func(r rune) bool {
+			return r < 0x0a
+		}) != -1 {
+			klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
+		} else {
+			klog.Infof("%s: %s", prefix, truncateBody(string(body)))
+		}
+	}
+}
+
+// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error.
+const maxUnstructuredResponseTextBytes = 2048
+
+// transformUnstructuredResponseError handles an error from the server that is not in a structured form.
+// It is expected to transform any response that is not recognizable as a clear server sent error from the
+// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries
+// introduce a level of uncertainty to the responses returned by servers that in common use result in
+// unexpected responses. The rough structure is:
+//
+// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
+//    - this is the happy path
+//    - when you get this output, trust what the server sends
+// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
+//    generate a reasonable facsimile of the original failure.
+//    - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
+// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
+// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
+//    initial contact, the presence of mismatched body contents from posted content types
+//    - Give these a separate distinct error type and capture as much as possible of the original message
+//
+// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
+func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
+	if body == nil && resp.Body != nil {
+		if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil {
+			body = data
+		}
+	}
+	retryAfter, _ := retryAfterSeconds(resp)
+	return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+}
+
+// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body.
+func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error {
+	// cap the amount of output we create
+	if len(body) > maxUnstructuredResponseTextBytes {
+		body = body[:maxUnstructuredResponseTextBytes]
+	}
+
+	message := "unknown"
+	if isTextResponse {
+		message = strings.TrimSpace(string(body))
+	}
+	var groupResource schema.GroupResource
+	if len(r.resource) > 0 {
+		groupResource.Group = r.content.GroupVersion.Group
+		groupResource.Resource = r.resource
+	}
+	return errors.NewGenericServerResponse(
+		statusCode,
+		method,
+		groupResource,
+		r.resourceName,
+		message,
+		retryAfter,
+		true,
+	)
+}
+
+// isTextResponse returns true if the response appears to be a textual media type.
+func isTextResponse(resp *http.Response) bool {
+	contentType := resp.Header.Get("Content-Type")
+	if len(contentType) == 0 {
+		return true
+	}
+	media, _, err := mime.ParseMediaType(contentType)
+	if err != nil {
+		return false
+	}
+	return strings.HasPrefix(media, "text/")
+}
+
+// checkWait returns true along with a number of seconds if the server instructed us to wait
+// before retrying.
+func checkWait(resp *http.Response) (int, bool) {
+	switch r := resp.StatusCode; {
+	// any 500 error code and 429 can trigger a wait
+	case r == http.StatusTooManyRequests, r >= 500:
+	default:
+		return 0, false
+	}
+	i, ok := retryAfterSeconds(resp)
+	return i, ok
+}
+
+// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
+// the header was missing or not a valid number.
+func retryAfterSeconds(resp *http.Response) (int, bool) {
+	if h := resp.Header.Get("Retry-After"); len(h) > 0 {
+		if i, err := strconv.Atoi(h); err == nil {
+			return i, true
+		}
+	}
+	return 0, false
+}
+
+// Result contains the result of calling Request.Do().
+type Result struct {
+	body        []byte
+	contentType string
+	err         error
+	statusCode  int
+
+	decoder runtime.Decoder
+}
+
+// Raw returns the raw result.
+func (r Result) Raw() ([]byte, error) {
+	return r.body, r.err
+}
+
+// Get returns the result as an object, which means it passes through the decoder.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Get() (runtime.Object, error) {
+	if r.err != nil {
+		// Check whether the result has a Status object in the body and prefer that.
+		return nil, r.Error()
+	}
+	if r.decoder == nil {
+		return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+	}
+
+	// decode, but if the result is Status return that as an error instead.
+	out, _, err := r.decoder.Decode(r.body, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	switch t := out.(type) {
+	case *metav1.Status:
+		// any status besides StatusSuccess is considered an error.
+		if t.Status != metav1.StatusSuccess {
+			return nil, errors.FromObject(t)
+		}
+	}
+	return out, nil
+}
+
+// StatusCode returns the HTTP status code of the request. (Only valid if no
+// error was returned.)
+func (r Result) StatusCode(statusCode *int) Result {
+	*statusCode = r.statusCode
+	return r
+}
+
+// Into stores the result into obj, if possible. If obj is nil it is ignored.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Into(obj runtime.Object) error {
+	if r.err != nil {
+		// Check whether the result has a Status object in the body and prefer that.
+		return r.Error()
+	}
+	if r.decoder == nil {
+		return fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+	}
+	if len(r.body) == 0 {
+		return fmt.Errorf("0-length response with status code: %d and content type: %s",
+			r.statusCode, r.contentType)
+	}
+
+	out, _, err := r.decoder.Decode(r.body, nil, obj)
+	if err != nil || out == obj {
+		return err
+	}
+	// if a different object is returned, see if it is Status and avoid double decoding
+	// the object.
+	switch t := out.(type) {
+	case *metav1.Status:
+		// any status besides StatusSuccess is considered an error.
+		if t.Status != metav1.StatusSuccess {
+			return errors.FromObject(t)
+		}
+	}
+	return nil
+}
+
+// WasCreated updates the provided bool pointer to whether the server returned
+// 201 created or a different response.
+func (r Result) WasCreated(wasCreated *bool) Result {
+	*wasCreated = r.statusCode == http.StatusCreated
+	return r
+}
+
+// Error returns the error executing the request, nil if no error occurred.
+// If the returned object is of type Status and has Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+// See the Request.Do() comment for what errors you might get.
+func (r Result) Error() error {
+	// if we have received an unexpected server error, and we have a body and decoder, we can try to extract
+	// a Status object.
+	if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil {
+		return r.err
+	}
+
+	// attempt to convert the body into a Status object
+	// to be backwards compatible with old servers that do not return a version, default to "v1"
+	out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
+	if err != nil {
+		klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
+		return r.err
+	}
+	switch t := out.(type) {
+	case *metav1.Status:
+		// because we default the kind, we *must* check for StatusFailure
+		if t.Status == metav1.StatusFailure {
+			return errors.FromObject(t)
+		}
+	}
+	return r.err
+}
+
+// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store)
+var NameMayNotBe = []string{".", ".."}
+
+// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store)
+var NameMayNotContain = []string{"/", "%"}
+
+// IsValidPathSegmentName validates the name can be safely encoded as a path segment
+func IsValidPathSegmentName(name string) []string {
+	for _, illegalName := range NameMayNotBe {
+		if name == illegalName {
+			return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
+		}
+	}
+
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
+// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
+func IsValidPathSegmentPrefix(name string) []string {
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// ValidatePathSegmentName validates the name can be safely encoded as a path segment
+func ValidatePathSegmentName(name string, prefix bool) []string {
+	if prefix {
+		return IsValidPathSegmentPrefix(name)
+	}
+	return IsValidPathSegmentName(name)
+}