[VOL-3678] First implementation of the BBSim-sadis-server

Change-Id: I5077a8f861f4cc6af9759f31a4a415042c05eba3
diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
new file mode 100644
index 0000000..d18a178
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS
@@ -0,0 +1,23 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- saad-ali
+- janetkuo
+- tallclair
+- dims
+- hongchaodeng
+- krousey
+- cjcullen
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
new file mode 100644
index 0000000..167baf6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package errors provides detailed error types for api field validation.
+package errors // import "k8s.io/apimachinery/pkg/api/errors"
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
new file mode 100644
index 0000000..d3927d8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -0,0 +1,697 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"reflect"
+	"strings"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// StatusError is an error intended for consumption by a REST API server; it can also be
+// reconstructed by clients from a REST response. Public to allow easy type switches.
+type StatusError struct {
+	ErrStatus metav1.Status
+}
+
+// APIStatus is exposed by errors that can be converted to an api.Status object
+// for finer grained details.
+type APIStatus interface {
+	Status() metav1.Status
+}
+
+var _ error = &StatusError{}
+
+// Error implements the Error interface.
+func (e *StatusError) Error() string {
+	return e.ErrStatus.Message
+}
+
+// Status allows access to e's status without having to know the detailed workings
+// of StatusError.
+func (e *StatusError) Status() metav1.Status {
+	return e.ErrStatus
+}
+
+// DebugError reports extended info about the error to debug output.
+func (e *StatusError) DebugError() (string, []interface{}) {
+	if out, err := json.MarshalIndent(e.ErrStatus, "", "  "); err == nil {
+		return "server response object: %s", []interface{}{string(out)}
+	}
+	return "server response object: %#v", []interface{}{e.ErrStatus}
+}
+
+// HasStatusCause returns true if the provided error has a details cause
+// with the provided type name.
+func HasStatusCause(err error, name metav1.CauseType) bool {
+	_, ok := StatusCause(err, name)
+	return ok
+}
+
+// StatusCause returns the named cause from the provided error if it exists and
+// the error is of the type APIStatus. Otherwise it returns false.
+func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) {
+	apierr, ok := err.(APIStatus)
+	if !ok || apierr == nil || apierr.Status().Details == nil {
+		return metav1.StatusCause{}, false
+	}
+	for _, cause := range apierr.Status().Details.Causes {
+		if cause.Type == name {
+			return cause, true
+		}
+	}
+	return metav1.StatusCause{}, false
+}
+
+// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object.
+type UnexpectedObjectError struct {
+	Object runtime.Object
+}
+
+// Error returns an error message describing 'u'.
+func (u *UnexpectedObjectError) Error() string {
+	return fmt.Sprintf("unexpected object: %v", u.Object)
+}
+
+// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise,
+// returns an UnexpecteObjectError.
+func FromObject(obj runtime.Object) error {
+	switch t := obj.(type) {
+	case *metav1.Status:
+		return &StatusError{ErrStatus: *t}
+	case runtime.Unstructured:
+		var status metav1.Status
+		obj := t.UnstructuredContent()
+		if !reflect.DeepEqual(obj["kind"], "Status") {
+			break
+		}
+		if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil {
+			return err
+		}
+		if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" {
+			break
+		}
+		return &StatusError{ErrStatus: status}
+	}
+	return &UnexpectedObjectError{obj}
+}
+
+// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found.
+func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusNotFound,
+		Reason: metav1.StatusReasonNotFound,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name),
+	}}
+}
+
+// NewAlreadyExists returns an error indicating the item requested exists by that identifier.
+func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusConflict,
+		Reason: metav1.StatusReasonAlreadyExists,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name),
+	}}
+}
+
+// NewUnauthorized returns an error indicating the client is not authorized to perform the requested
+// action.
+func NewUnauthorized(reason string) *StatusError {
+	message := reason
+	if len(message) == 0 {
+		message = "not authorized"
+	}
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusUnauthorized,
+		Reason:  metav1.StatusReasonUnauthorized,
+		Message: message,
+	}}
+}
+
+// NewForbidden returns an error indicating the requested action was forbidden
+func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
+	var message string
+	if qualifiedResource.Empty() {
+		message = fmt.Sprintf("forbidden: %v", err)
+	} else if name == "" {
+		message = fmt.Sprintf("%s is forbidden: %v", qualifiedResource.String(), err)
+	} else {
+		message = fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err)
+	}
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusForbidden,
+		Reason: metav1.StatusReasonForbidden,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: message,
+	}}
+}
+
+// NewConflict returns an error indicating the item can't be updated as provided.
+func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusConflict,
+		Reason: metav1.StatusReasonConflict,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+		},
+		Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err),
+	}}
+}
+
+// NewApplyConflict returns an error including details on the requests apply conflicts
+func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError {
+	return &StatusError{ErrStatus: metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusConflict,
+		Reason: metav1.StatusReasonConflict,
+		Details: &metav1.StatusDetails{
+			// TODO: Get obj details here?
+			Causes: causes,
+		},
+		Message: message,
+	}}
+}
+
+// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
+// DEPRECATED: Please use NewResourceExpired instead.
+func NewGone(message string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusGone,
+		Reason:  metav1.StatusReasonGone,
+		Message: message,
+	}}
+}
+
+// NewResourceExpired creates an error that indicates that the requested resource content has expired from
+// the server (usually due to a resourceVersion that is too old).
+func NewResourceExpired(message string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusGone,
+		Reason:  metav1.StatusReasonExpired,
+		Message: message,
+	}}
+}
+
+// NewInvalid returns an error indicating the item is invalid and cannot be processed.
+func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError {
+	causes := make([]metav1.StatusCause, 0, len(errs))
+	for i := range errs {
+		err := errs[i]
+		causes = append(causes, metav1.StatusCause{
+			Type:    metav1.CauseType(err.Type),
+			Message: err.ErrorBody(),
+			Field:   err.Field,
+		})
+	}
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusUnprocessableEntity,
+		Reason: metav1.StatusReasonInvalid,
+		Details: &metav1.StatusDetails{
+			Group:  qualifiedKind.Group,
+			Kind:   qualifiedKind.Kind,
+			Name:   name,
+			Causes: causes,
+		},
+		Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()),
+	}}
+}
+
+// NewBadRequest creates an error that indicates that the request is invalid and can not be processed.
+func NewBadRequest(reason string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusBadRequest,
+		Reason:  metav1.StatusReasonBadRequest,
+		Message: reason,
+	}}
+}
+
+// NewTooManyRequests creates an error that indicates that the client must try again later because
+// the specified endpoint is not accepting requests. More specific details should be provided
+// if client should know why the failure was limited4.
+func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusTooManyRequests,
+		Reason:  metav1.StatusReasonTooManyRequests,
+		Message: message,
+		Details: &metav1.StatusDetails{
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+	}}
+}
+
+// NewServiceUnavailable creates an error that indicates that the requested service is unavailable.
+func NewServiceUnavailable(reason string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusServiceUnavailable,
+		Reason:  metav1.StatusReasonServiceUnavailable,
+		Message: reason,
+	}}
+}
+
+// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind.
+func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusMethodNotAllowed,
+		Reason: metav1.StatusReasonMethodNotAllowed,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+		},
+		Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()),
+	}}
+}
+
+// NewServerTimeout returns an error indicating the requested action could not be completed due to a
+// transient error, and the client should try again.
+func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusInternalServerError,
+		Reason: metav1.StatusReasonServerTimeout,
+		Details: &metav1.StatusDetails{
+			Group:             qualifiedResource.Group,
+			Kind:              qualifiedResource.Resource,
+			Name:              operation,
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+		Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()),
+	}}
+}
+
+// NewServerTimeoutForKind should not exist.  Server timeouts happen when accessing resources, the Kind is just what we
+// happened to be looking at when the request failed.  This delegates to keep code sane, but we should work towards removing this.
+func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError {
+	return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds)
+}
+
+// NewInternalError returns an error indicating the item is invalid and cannot be processed.
+func NewInternalError(err error) *StatusError {
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   http.StatusInternalServerError,
+		Reason: metav1.StatusReasonInternalError,
+		Details: &metav1.StatusDetails{
+			Causes: []metav1.StatusCause{{Message: err.Error()}},
+		},
+		Message: fmt.Sprintf("Internal error occurred: %v", err),
+	}}
+}
+
+// NewTimeoutError returns an error indicating that a timeout occurred before the request
+// could be completed.  Clients may retry, but the operation may still complete.
+func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusGatewayTimeout,
+		Reason:  metav1.StatusReasonTimeout,
+		Message: fmt.Sprintf("Timeout: %s", message),
+		Details: &metav1.StatusDetails{
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+	}}
+}
+
+// NewTooManyRequestsError returns an error indicating that the request was rejected because
+// the server has received too many requests. Client should wait and retry. But if the request
+// is perishable, then the client should not retry the request.
+func NewTooManyRequestsError(message string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusTooManyRequests,
+		Reason:  metav1.StatusReasonTooManyRequests,
+		Message: fmt.Sprintf("Too many requests: %s", message),
+	}}
+}
+
+// NewRequestEntityTooLargeError returns an error indicating that the request
+// entity was too large.
+func NewRequestEntityTooLargeError(message string) *StatusError {
+	return &StatusError{metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusRequestEntityTooLarge,
+		Reason:  metav1.StatusReasonRequestEntityTooLarge,
+		Message: fmt.Sprintf("Request entity too large: %s", message),
+	}}
+}
+
+// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
+func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
+	reason := metav1.StatusReasonUnknown
+	message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
+	switch code {
+	case http.StatusConflict:
+		if verb == "POST" {
+			reason = metav1.StatusReasonAlreadyExists
+		} else {
+			reason = metav1.StatusReasonConflict
+		}
+		message = "the server reported a conflict"
+	case http.StatusNotFound:
+		reason = metav1.StatusReasonNotFound
+		message = "the server could not find the requested resource"
+	case http.StatusBadRequest:
+		reason = metav1.StatusReasonBadRequest
+		message = "the server rejected our request for an unknown reason"
+	case http.StatusUnauthorized:
+		reason = metav1.StatusReasonUnauthorized
+		message = "the server has asked for the client to provide credentials"
+	case http.StatusForbidden:
+		reason = metav1.StatusReasonForbidden
+		// the server message has details about who is trying to perform what action.  Keep its message.
+		message = serverMessage
+	case http.StatusNotAcceptable:
+		reason = metav1.StatusReasonNotAcceptable
+		// the server message has details about what types are acceptable
+		if len(serverMessage) == 0 || serverMessage == "unknown" {
+			message = "the server was unable to respond with a content type that the client supports"
+		} else {
+			message = serverMessage
+		}
+	case http.StatusUnsupportedMediaType:
+		reason = metav1.StatusReasonUnsupportedMediaType
+		// the server message has details about what types are acceptable
+		message = serverMessage
+	case http.StatusMethodNotAllowed:
+		reason = metav1.StatusReasonMethodNotAllowed
+		message = "the server does not allow this method on the requested resource"
+	case http.StatusUnprocessableEntity:
+		reason = metav1.StatusReasonInvalid
+		message = "the server rejected our request due to an error in our request"
+	case http.StatusServiceUnavailable:
+		reason = metav1.StatusReasonServiceUnavailable
+		message = "the server is currently unable to handle the request"
+	case http.StatusGatewayTimeout:
+		reason = metav1.StatusReasonTimeout
+		message = "the server was unable to return a response in the time allotted, but may still be processing the request"
+	case http.StatusTooManyRequests:
+		reason = metav1.StatusReasonTooManyRequests
+		message = "the server has received too many requests and has asked us to try again later"
+	default:
+		if code >= 500 {
+			reason = metav1.StatusReasonInternalError
+			message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage)
+		}
+	}
+	switch {
+	case !qualifiedResource.Empty() && len(name) > 0:
+		message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name)
+	case !qualifiedResource.Empty():
+		message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String())
+	}
+	var causes []metav1.StatusCause
+	if isUnexpectedResponse {
+		causes = []metav1.StatusCause{
+			{
+				Type:    metav1.CauseTypeUnexpectedServerResponse,
+				Message: serverMessage,
+			},
+		}
+	} else {
+		causes = nil
+	}
+	return &StatusError{metav1.Status{
+		Status: metav1.StatusFailure,
+		Code:   int32(code),
+		Reason: reason,
+		Details: &metav1.StatusDetails{
+			Group: qualifiedResource.Group,
+			Kind:  qualifiedResource.Resource,
+			Name:  name,
+
+			Causes:            causes,
+			RetryAfterSeconds: int32(retryAfterSeconds),
+		},
+		Message: message,
+	}}
+}
+
+// IsNotFound returns true if the specified error was created by NewNotFound.
+// It supports wrapped errors.
+func IsNotFound(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonNotFound
+}
+
+// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists.
+// It supports wrapped errors.
+func IsAlreadyExists(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonAlreadyExists
+}
+
+// IsConflict determines if the err is an error which indicates the provided update conflicts.
+// It supports wrapped errors.
+func IsConflict(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonConflict
+}
+
+// IsInvalid determines if the err is an error which indicates the provided resource is not valid.
+// It supports wrapped errors.
+func IsInvalid(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonInvalid
+}
+
+// IsGone is true if the error indicates the requested resource is no longer available.
+// It supports wrapped errors.
+func IsGone(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonGone
+}
+
+// IsResourceExpired is true if the error indicates the resource has expired and the current action is
+// no longer possible.
+// It supports wrapped errors.
+func IsResourceExpired(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonExpired
+}
+
+// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header
+// It supports wrapped errors.
+func IsNotAcceptable(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonNotAcceptable
+}
+
+// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header
+// It supports wrapped errors.
+func IsUnsupportedMediaType(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType
+}
+
+// IsMethodNotSupported determines if the err is an error which indicates the provided action could not
+// be performed because it is not supported by the server.
+// It supports wrapped errors.
+func IsMethodNotSupported(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed
+}
+
+// IsServiceUnavailable is true if the error indicates the underlying service is no longer available.
+// It supports wrapped errors.
+func IsServiceUnavailable(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonServiceUnavailable
+}
+
+// IsBadRequest determines if err is an error which indicates that the request is invalid.
+// It supports wrapped errors.
+func IsBadRequest(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonBadRequest
+}
+
+// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and
+// requires authentication by the user.
+// It supports wrapped errors.
+func IsUnauthorized(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonUnauthorized
+}
+
+// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot
+// be completed as requested.
+// It supports wrapped errors.
+func IsForbidden(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonForbidden
+}
+
+// IsTimeout determines if err is an error which indicates that request times out due to long
+// processing.
+// It supports wrapped errors.
+func IsTimeout(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonTimeout
+}
+
+// IsServerTimeout determines if err is an error which indicates that the request needs to be retried
+// by the client.
+// It supports wrapped errors.
+func IsServerTimeout(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonServerTimeout
+}
+
+// IsInternalError determines if err is an error which indicates an internal server error.
+// It supports wrapped errors.
+func IsInternalError(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonInternalError
+}
+
+// IsTooManyRequests determines if err is an error which indicates that there are too many requests
+// that the server cannot handle.
+// It supports wrapped errors.
+func IsTooManyRequests(err error) bool {
+	if ReasonForError(err) == metav1.StatusReasonTooManyRequests {
+		return true
+	}
+	if status := APIStatus(nil); errors.As(err, &status) {
+		return status.Status().Code == http.StatusTooManyRequests
+	}
+	return false
+}
+
+// IsRequestEntityTooLargeError determines if err is an error which indicates
+// the request entity is too large.
+// It supports wrapped errors.
+func IsRequestEntityTooLargeError(err error) bool {
+	if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
+		return true
+	}
+	if status := APIStatus(nil); errors.As(err, &status) {
+		return status.Status().Code == http.StatusRequestEntityTooLarge
+	}
+	return false
+}
+
+// IsUnexpectedServerError returns true if the server response was not in the expected API format,
+// and may be the result of another HTTP actor.
+// It supports wrapped errors.
+func IsUnexpectedServerError(err error) bool {
+	if status := APIStatus(nil); errors.As(err, &status) && status.Status().Details != nil {
+		for _, cause := range status.Status().Details.Causes {
+			if cause.Type == metav1.CauseTypeUnexpectedServerResponse {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// IsUnexpectedObjectError determines if err is due to an unexpected object from the master.
+// It supports wrapped errors.
+func IsUnexpectedObjectError(err error) bool {
+	uoe := &UnexpectedObjectError{}
+	return err != nil && errors.As(err, &uoe)
+}
+
+// SuggestsClientDelay returns true if this error suggests a client delay as well as the
+// suggested seconds to wait, or false if the error does not imply a wait. It does not
+// address whether the error *should* be retried, since some errors (like a 3xx) may
+// request delay without retry.
+// It supports wrapped errors.
+func SuggestsClientDelay(err error) (int, bool) {
+	if t := APIStatus(nil); errors.As(err, &t) && t.Status().Details != nil {
+		switch t.Status().Reason {
+		// this StatusReason explicitly requests the caller to delay the action
+		case metav1.StatusReasonServerTimeout:
+			return int(t.Status().Details.RetryAfterSeconds), true
+		}
+		// If the client requests that we retry after a certain number of seconds
+		if t.Status().Details.RetryAfterSeconds > 0 {
+			return int(t.Status().Details.RetryAfterSeconds), true
+		}
+	}
+	return 0, false
+}
+
+// ReasonForError returns the HTTP status for a particular error.
+// It supports wrapped errors.
+func ReasonForError(err error) metav1.StatusReason {
+	if status := APIStatus(nil); errors.As(err, &status) {
+		return status.Status().Reason
+	}
+	return metav1.StatusReasonUnknown
+}
+
+// ErrorReporter converts generic errors into runtime.Object errors without
+// requiring the caller to take a dependency on meta/v1 (where Status lives).
+// This prevents circular dependencies in core watch code.
+type ErrorReporter struct {
+	code   int
+	verb   string
+	reason string
+}
+
+// NewClientErrorReporter will respond with valid v1.Status objects that report
+// unexpected server responses. Primarily used by watch to report errors when
+// we attempt to decode a response from the server and it is not in the form
+// we expect. Because watch is a dependency of the core api, we can't return
+// meta/v1.Status in that package and so much inject this interface to convert a
+// generic error as appropriate. The reason is passed as a unique status cause
+// on the returned status, otherwise the generic "ClientError" is returned.
+func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter {
+	return &ErrorReporter{
+		code:   code,
+		verb:   verb,
+		reason: reason,
+	}
+}
+
+// AsObject returns a valid error runtime.Object (a v1.Status) for the given
+// error, using the code and verb of the reporter type. The error is set to
+// indicate that this was an unexpected server response.
+func (r *ErrorReporter) AsObject(err error) runtime.Object {
+	status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true)
+	if status.ErrStatus.Details == nil {
+		status.ErrStatus.Details = &metav1.StatusDetails{}
+	}
+	reason := r.reason
+	if len(reason) == 0 {
+		reason = "ClientError"
+	}
+	status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{
+		Type:    metav1.CauseType(reason),
+		Message: err.Error(),
+	})
+	return &status.ErrStatus
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
new file mode 100644
index 0000000..68b8d35
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
@@ -0,0 +1,21 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- derekwaynecarr
+- caesarxuchao
+- mikedanese
+- liggitt
+- nikhiljindal
+- gmarek
+- janetkuo
+- ncdc
+- dims
+- krousey
+- resouer
+- mfojtik
+- jianhuiz
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
new file mode 100644
index 0000000..934790d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// SetStatusCondition sets the corresponding condition in conditions to newCondition.
+// conditions must be non-nil.
+// 1. if the condition of the specified type already exists (all fields of the existing condition are updated to
+//    newCondition, LastTransitionTime is set to now if the new status differs from the old status)
+// 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended)
+func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) {
+	if conditions == nil {
+		return
+	}
+	existingCondition := FindStatusCondition(*conditions, newCondition.Type)
+	if existingCondition == nil {
+		if newCondition.LastTransitionTime.IsZero() {
+			newCondition.LastTransitionTime = metav1.NewTime(time.Now())
+		}
+		*conditions = append(*conditions, newCondition)
+		return
+	}
+
+	if existingCondition.Status != newCondition.Status {
+		existingCondition.Status = newCondition.Status
+		if !newCondition.LastTransitionTime.IsZero() {
+			existingCondition.LastTransitionTime = newCondition.LastTransitionTime
+		} else {
+			existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
+		}
+	}
+
+	existingCondition.Reason = newCondition.Reason
+	existingCondition.Message = newCondition.Message
+}
+
+// RemoveStatusCondition removes the corresponding conditionType from conditions.
+// conditions must be non-nil.
+func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) {
+	if conditions == nil {
+		return
+	}
+	newConditions := make([]metav1.Condition, 0, len(*conditions)-1)
+	for _, condition := range *conditions {
+		if condition.Type != conditionType {
+			newConditions = append(newConditions, condition)
+		}
+	}
+
+	*conditions = newConditions
+}
+
+// FindStatusCondition finds the conditionType in conditions.
+func FindStatusCondition(conditions []metav1.Condition, conditionType string) *metav1.Condition {
+	for i := range conditions {
+		if conditions[i].Type == conditionType {
+			return &conditions[i]
+		}
+	}
+
+	return nil
+}
+
+// IsStatusConditionTrue returns true when the conditionType is present and set to `metav1.ConditionTrue`
+func IsStatusConditionTrue(conditions []metav1.Condition, conditionType string) bool {
+	return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue)
+}
+
+// IsStatusConditionFalse returns true when the conditionType is present and set to `metav1.ConditionFalse`
+func IsStatusConditionFalse(conditions []metav1.Condition, conditionType string) bool {
+	return IsStatusConditionPresentAndEqual(conditions, conditionType, metav1.ConditionFalse)
+}
+
+// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status.
+func IsStatusConditionPresentAndEqual(conditions []metav1.Condition, conditionType string, status metav1.ConditionStatus) bool {
+	for _, condition := range conditions {
+		if condition.Type == conditionType {
+			return condition.Status == status
+		}
+	}
+	return false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
new file mode 100644
index 0000000..b6d42ac
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package meta provides functions for retrieving API metadata from objects
+// belonging to the Kubernetes API
+package meta // import "k8s.io/apimachinery/pkg/api/meta"
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
new file mode 100644
index 0000000..cbf5d02
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource
+type AmbiguousResourceError struct {
+	PartialResource schema.GroupVersionResource
+
+	MatchingResources []schema.GroupVersionResource
+	MatchingKinds     []schema.GroupVersionKind
+}
+
+func (e *AmbiguousResourceError) Error() string {
+	switch {
+	case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds)
+	case len(e.MatchingKinds) > 0:
+		return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds)
+	case len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources)
+	}
+	return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource)
+}
+
+// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind
+type AmbiguousKindError struct {
+	PartialKind schema.GroupVersionKind
+
+	MatchingResources []schema.GroupVersionResource
+	MatchingKinds     []schema.GroupVersionKind
+}
+
+func (e *AmbiguousKindError) Error() string {
+	switch {
+	case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds)
+	case len(e.MatchingKinds) > 0:
+		return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds)
+	case len(e.MatchingResources) > 0:
+		return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources)
+	}
+	return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind)
+}
+
+func IsAmbiguousError(err error) bool {
+	if err == nil {
+		return false
+	}
+	switch err.(type) {
+	case *AmbiguousResourceError, *AmbiguousKindError:
+		return true
+	default:
+		return false
+	}
+}
+
+// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource
+type NoResourceMatchError struct {
+	PartialResource schema.GroupVersionResource
+}
+
+func (e *NoResourceMatchError) Error() string {
+	return fmt.Sprintf("no matches for %v", e.PartialResource)
+}
+
+// NoKindMatchError is returned if the RESTMapper can't find any match for a kind
+type NoKindMatchError struct {
+	// GroupKind is the API group and kind that was searched
+	GroupKind schema.GroupKind
+	// SearchedVersions is the optional list of versions the search was restricted to
+	SearchedVersions []string
+}
+
+func (e *NoKindMatchError) Error() string {
+	searchedVersions := sets.NewString()
+	for _, v := range e.SearchedVersions {
+		searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String())
+	}
+
+	switch len(searchedVersions) {
+	case 0:
+		return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group)
+	case 1:
+		return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0])
+	default:
+		return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List())
+	}
+}
+
+func IsNoMatchError(err error) bool {
+	if err == nil {
+		return false
+	}
+	switch err.(type) {
+	case *NoResourceMatchError, *NoKindMatchError:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
new file mode 100644
index 0000000..fd22100
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the
+// first successful result for the singular requests
+type FirstHitRESTMapper struct {
+	MultiRESTMapper
+}
+
+func (m FirstHitRESTMapper) String() string {
+	return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper)
+}
+
+func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	errors := []error{}
+	for _, t := range m.MultiRESTMapper {
+		ret, err := t.ResourceFor(resource)
+		if err == nil {
+			return ret, nil
+		}
+		errors = append(errors, err)
+	}
+
+	return schema.GroupVersionResource{}, collapseAggregateErrors(errors)
+}
+
+func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	errors := []error{}
+	for _, t := range m.MultiRESTMapper {
+		ret, err := t.KindFor(resource)
+		if err == nil {
+			return ret, nil
+		}
+		errors = append(errors, err)
+	}
+
+	return schema.GroupVersionKind{}, collapseAggregateErrors(errors)
+}
+
+// RESTMapping provides the REST mapping for the resource based on the
+// kind and version. This implementation supports multiple REST schemas and
+// return the first match.
+func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	errors := []error{}
+	for _, t := range m.MultiRESTMapper {
+		ret, err := t.RESTMapping(gk, versions...)
+		if err == nil {
+			return ret, nil
+		}
+		errors = append(errors, err)
+	}
+
+	return nil, collapseAggregateErrors(errors)
+}
+
+// collapseAggregateErrors returns the minimal errors.  it handles empty as nil, handles one item in a list
+// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same)
+func collapseAggregateErrors(errors []error) error {
+	if len(errors) == 0 {
+		return nil
+	}
+	if len(errors) == 1 {
+		return errors[0]
+	}
+
+	allNoMatchErrors := true
+	for _, err := range errors {
+		allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err)
+	}
+	if allNoMatchErrors {
+		return errors[0]
+	}
+
+	return utilerrors.NewAggregate(errors)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
new file mode 100644
index 0000000..50468b5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go
@@ -0,0 +1,264 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+	// isListCache maintains a cache of types that are checked for lists
+	// which is used by IsListType.
+	// TODO: remove and replace with an interface check
+	isListCache = struct {
+		lock   sync.RWMutex
+		byType map[reflect.Type]bool
+	}{
+		byType: make(map[reflect.Type]bool, 1024),
+	}
+)
+
+// IsListType returns true if the provided Object has a slice called Items.
+// TODO: Replace the code in this check with an interface comparison by
+//   creating and enforcing that lists implement a list accessor.
+func IsListType(obj runtime.Object) bool {
+	switch t := obj.(type) {
+	case runtime.Unstructured:
+		return t.IsList()
+	}
+	t := reflect.TypeOf(obj)
+
+	isListCache.lock.RLock()
+	ok, exists := isListCache.byType[t]
+	isListCache.lock.RUnlock()
+
+	if !exists {
+		_, err := getItemsPtr(obj)
+		ok = err == nil
+
+		// cache only the first 1024 types
+		isListCache.lock.Lock()
+		if len(isListCache.byType) < 1024 {
+			isListCache.byType[t] = ok
+		}
+		isListCache.lock.Unlock()
+	}
+
+	return ok
+}
+
+var (
+	errExpectFieldItems = errors.New("no Items field in this object")
+	errExpectSliceItems = errors.New("Items field must be a slice of objects")
+)
+
+// GetItemsPtr returns a pointer to the list object's Items member.
+// If 'list' doesn't have an Items member, it's not really a list type
+// and an error will be returned.
+// This function will either return a pointer to a slice, or an error, but not both.
+// TODO: this will be replaced with an interface in the future
+func GetItemsPtr(list runtime.Object) (interface{}, error) {
+	obj, err := getItemsPtr(list)
+	if err != nil {
+		return nil, fmt.Errorf("%T is not a list: %v", list, err)
+	}
+	return obj, nil
+}
+
+// getItemsPtr returns a pointer to the list object's Items member or an error.
+func getItemsPtr(list runtime.Object) (interface{}, error) {
+	v, err := conversion.EnforcePtr(list)
+	if err != nil {
+		return nil, err
+	}
+
+	items := v.FieldByName("Items")
+	if !items.IsValid() {
+		return nil, errExpectFieldItems
+	}
+	switch items.Kind() {
+	case reflect.Interface, reflect.Ptr:
+		target := reflect.TypeOf(items.Interface()).Elem()
+		if target.Kind() != reflect.Slice {
+			return nil, errExpectSliceItems
+		}
+		return items.Interface(), nil
+	case reflect.Slice:
+		return items.Addr().Interface(), nil
+	default:
+		return nil, errExpectSliceItems
+	}
+}
+
+// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates
+// the loop.
+func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error {
+	if unstructured, ok := obj.(runtime.Unstructured); ok {
+		return unstructured.EachListItem(fn)
+	}
+	// TODO: Change to an interface call?
+	itemsPtr, err := GetItemsPtr(obj)
+	if err != nil {
+		return err
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return err
+	}
+	len := items.Len()
+	if len == 0 {
+		return nil
+	}
+	takeAddr := false
+	if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface {
+		if !items.Index(0).CanAddr() {
+			return fmt.Errorf("unable to take address of items in %T for EachListItem", obj)
+		}
+		takeAddr = true
+	}
+
+	for i := 0; i < len; i++ {
+		raw := items.Index(i)
+		if takeAddr {
+			raw = raw.Addr()
+		}
+		switch item := raw.Interface().(type) {
+		case *runtime.RawExtension:
+			if err := fn(item.Object); err != nil {
+				return err
+			}
+		case runtime.Object:
+			if err := fn(item); err != nil {
+				return err
+			}
+		default:
+			obj, ok := item.(runtime.Object)
+			if !ok {
+				return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+			}
+			if err := fn(obj); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// ExtractList returns obj's Items element as an array of runtime.Objects.
+// Returns an error if obj is not a List type (does not have an Items member).
+func ExtractList(obj runtime.Object) ([]runtime.Object, error) {
+	itemsPtr, err := GetItemsPtr(obj)
+	if err != nil {
+		return nil, err
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return nil, err
+	}
+	list := make([]runtime.Object, items.Len())
+	for i := range list {
+		raw := items.Index(i)
+		switch item := raw.Interface().(type) {
+		case runtime.RawExtension:
+			switch {
+			case item.Object != nil:
+				list[i] = item.Object
+			case item.Raw != nil:
+				// TODO: Set ContentEncoding and ContentType correctly.
+				list[i] = &runtime.Unknown{Raw: item.Raw}
+			default:
+				list[i] = nil
+			}
+		case runtime.Object:
+			list[i] = item
+		default:
+			var found bool
+			if list[i], found = raw.Addr().Interface().(runtime.Object); !found {
+				return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind())
+			}
+		}
+	}
+	return list, nil
+}
+
+// objectSliceType is the type of a slice of Objects
+var objectSliceType = reflect.TypeOf([]runtime.Object{})
+
+// LenList returns the length of this list or 0 if it is not a list.
+func LenList(list runtime.Object) int {
+	itemsPtr, err := GetItemsPtr(list)
+	if err != nil {
+		return 0
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return 0
+	}
+	return items.Len()
+}
+
+// SetList sets the given list object's Items member have the elements given in
+// objects.
+// Returns an error if list is not a List type (does not have an Items member),
+// or if any of the objects are not of the right type.
+func SetList(list runtime.Object, objects []runtime.Object) error {
+	itemsPtr, err := GetItemsPtr(list)
+	if err != nil {
+		return err
+	}
+	items, err := conversion.EnforcePtr(itemsPtr)
+	if err != nil {
+		return err
+	}
+	if items.Type() == objectSliceType {
+		items.Set(reflect.ValueOf(objects))
+		return nil
+	}
+	slice := reflect.MakeSlice(items.Type(), len(objects), len(objects))
+	for i := range objects {
+		dest := slice.Index(i)
+		if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) {
+			dest = dest.FieldByName("Object")
+		}
+
+		// check to see if you're directly assignable
+		if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) {
+			dest.Set(reflect.ValueOf(objects[i]))
+			continue
+		}
+
+		src, err := conversion.EnforcePtr(objects[i])
+		if err != nil {
+			return err
+		}
+		if src.Type().AssignableTo(dest.Type()) {
+			dest.Set(src)
+		} else if src.Type().ConvertibleTo(dest.Type()) {
+			dest.Set(src.Convert(dest.Type()))
+		} else {
+			return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type())
+		}
+	}
+	items.Set(slice)
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
new file mode 100644
index 0000000..42eac3a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+type ListMetaAccessor interface {
+	GetListMeta() List
+}
+
+// List lets you work with list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+type List metav1.ListInterface
+
+// Type exposes the type and APIVersion of versioned or internal API objects.
+type Type metav1.Type
+
+// MetadataAccessor lets you work with object and list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field (Name, UID, Namespace on lists) will be a no-op and return
+// a default value.
+//
+// MetadataAccessor exposes Interface in a way that can be used with multiple objects.
+type MetadataAccessor interface {
+	APIVersion(obj runtime.Object) (string, error)
+	SetAPIVersion(obj runtime.Object, version string) error
+
+	Kind(obj runtime.Object) (string, error)
+	SetKind(obj runtime.Object, kind string) error
+
+	Namespace(obj runtime.Object) (string, error)
+	SetNamespace(obj runtime.Object, namespace string) error
+
+	Name(obj runtime.Object) (string, error)
+	SetName(obj runtime.Object, name string) error
+
+	GenerateName(obj runtime.Object) (string, error)
+	SetGenerateName(obj runtime.Object, name string) error
+
+	UID(obj runtime.Object) (types.UID, error)
+	SetUID(obj runtime.Object, uid types.UID) error
+
+	SelfLink(obj runtime.Object) (string, error)
+	SetSelfLink(obj runtime.Object, selfLink string) error
+
+	Labels(obj runtime.Object) (map[string]string, error)
+	SetLabels(obj runtime.Object, labels map[string]string) error
+
+	Annotations(obj runtime.Object) (map[string]string, error)
+	SetAnnotations(obj runtime.Object, annotations map[string]string) error
+
+	Continue(obj runtime.Object) (string, error)
+	SetContinue(obj runtime.Object, c string) error
+
+	runtime.ResourceVersioner
+}
+
+type RESTScopeName string
+
+const (
+	RESTScopeNameNamespace RESTScopeName = "namespace"
+	RESTScopeNameRoot      RESTScopeName = "root"
+)
+
+// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy
+type RESTScope interface {
+	// Name of the scope
+	Name() RESTScopeName
+}
+
+// RESTMapping contains the information needed to deal with objects of a specific
+// resource and kind in a RESTful manner.
+type RESTMapping struct {
+	// Resource is the GroupVersionResource (location) for this endpoint
+	Resource schema.GroupVersionResource
+
+	// GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint
+	GroupVersionKind schema.GroupVersionKind
+
+	// Scope contains the information needed to deal with REST Resources that are in a resource hierarchy
+	Scope RESTScope
+}
+
+// RESTMapper allows clients to map resources to kind, and map kind and version
+// to interfaces for manipulating those objects. It is primarily intended for
+// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md.
+//
+// The Kubernetes API provides versioned resources and object kinds which are scoped
+// to API groups. In other words, kinds and resources should not be assumed to be
+// unique across groups.
+//
+// TODO: split into sub-interfaces
+type RESTMapper interface {
+	// KindFor takes a partial resource and returns the single match.  Returns an error if there are multiple matches
+	KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error)
+
+	// KindsFor takes a partial resource and returns the list of potential kinds in priority order
+	KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error)
+
+	// ResourceFor takes a partial resource and returns the single match.  Returns an error if there are multiple matches
+	ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error)
+
+	// ResourcesFor takes a partial resource and returns the list of potential resource in priority order
+	ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error)
+
+	// RESTMapping identifies a preferred resource mapping for the provided group kind.
+	RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error)
+	// RESTMappings returns all resource mappings for the provided group kind if no
+	// version search is provided. Otherwise identifies a preferred resource mapping for
+	// the provided version(s).
+	RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error)
+
+	ResourceSingularizer(resource string) (singular string, err error)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
new file mode 100644
index 0000000..431a0a6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go
@@ -0,0 +1,104 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"sync"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// lazyObject defers loading the mapper and typer until necessary.
+type lazyObject struct {
+	loader func() (RESTMapper, error)
+
+	lock   sync.Mutex
+	loaded bool
+	err    error
+	mapper RESTMapper
+}
+
+// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by
+// returning those initialization errors when the interface methods are invoked. This defers the
+// initialization and any server calls until a client actually needs to perform the action.
+func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper {
+	obj := &lazyObject{loader: fn}
+	return obj
+}
+
+// init lazily loads the mapper and typer, returning an error if initialization has failed.
+func (o *lazyObject) init() error {
+	o.lock.Lock()
+	defer o.lock.Unlock()
+	if o.loaded {
+		return o.err
+	}
+	o.mapper, o.err = o.loader()
+	o.loaded = true
+	return o.err
+}
+
+var _ RESTMapper = &lazyObject{}
+
+func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	if err := o.init(); err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	return o.mapper.KindFor(resource)
+}
+
+func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+	if err := o.init(); err != nil {
+		return []schema.GroupVersionKind{}, err
+	}
+	return o.mapper.KindsFor(resource)
+}
+
+func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	if err := o.init(); err != nil {
+		return schema.GroupVersionResource{}, err
+	}
+	return o.mapper.ResourceFor(input)
+}
+
+func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	if err := o.init(); err != nil {
+		return []schema.GroupVersionResource{}, err
+	}
+	return o.mapper.ResourcesFor(input)
+}
+
+func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	if err := o.init(); err != nil {
+		return nil, err
+	}
+	return o.mapper.RESTMapping(gk, versions...)
+}
+
+func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	if err := o.init(); err != nil {
+		return nil, err
+	}
+	return o.mapper.RESTMappings(gk, versions...)
+}
+
+func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) {
+	if err := o.init(); err != nil {
+		return "", err
+	}
+	return o.mapper.ResourceSingularizer(resource)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
new file mode 100644
index 0000000..9ca34c9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go
@@ -0,0 +1,648 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+	"reflect"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/klog/v2"
+)
+
+// errNotList is returned when an object implements the Object style interfaces but not the List style
+// interfaces.
+var errNotList = fmt.Errorf("object does not implement the List interfaces")
+
+var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink")
+
+// CommonAccessor returns a Common interface for the provided object or an error if the object does
+// not provide List.
+func CommonAccessor(obj interface{}) (metav1.Common, error) {
+	switch t := obj.(type) {
+	case List:
+		return t, nil
+	case metav1.ListInterface:
+		return t, nil
+	case ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotCommon
+	case metav1.ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotCommon
+	case metav1.Object:
+		return t, nil
+	case metav1.ObjectMetaAccessor:
+		if m := t.GetObjectMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotCommon
+	default:
+		return nil, errNotCommon
+	}
+}
+
+// ListAccessor returns a List interface for the provided object or an error if the object does
+// not provide List.
+// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an
+// object *is* a List.
+func ListAccessor(obj interface{}) (List, error) {
+	switch t := obj.(type) {
+	case List:
+		return t, nil
+	case metav1.ListInterface:
+		return t, nil
+	case ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotList
+	case metav1.ListMetaAccessor:
+		if m := t.GetListMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotList
+	default:
+		return nil, errNotList
+	}
+}
+
+// errNotObject is returned when an object implements the List style interfaces but not the Object style
+// interfaces.
+var errNotObject = fmt.Errorf("object does not implement the Object interfaces")
+
+// Accessor takes an arbitrary object pointer and returns meta.Interface.
+// obj must be a pointer to an API type. An error is returned if the minimum
+// required fields are missing. Fields that are not required return the default
+// value and are a no-op if set.
+func Accessor(obj interface{}) (metav1.Object, error) {
+	switch t := obj.(type) {
+	case metav1.Object:
+		return t, nil
+	case metav1.ObjectMetaAccessor:
+		if m := t.GetObjectMeta(); m != nil {
+			return m, nil
+		}
+		return nil, errNotObject
+	default:
+		return nil, errNotObject
+	}
+}
+
+// AsPartialObjectMetadata takes the metav1 interface and returns a partial object.
+// TODO: consider making this solely a conversion action.
+func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata {
+	switch t := m.(type) {
+	case *metav1.ObjectMeta:
+		return &metav1.PartialObjectMetadata{ObjectMeta: *t}
+	default:
+		return &metav1.PartialObjectMetadata{
+			ObjectMeta: metav1.ObjectMeta{
+				Name:                       m.GetName(),
+				GenerateName:               m.GetGenerateName(),
+				Namespace:                  m.GetNamespace(),
+				SelfLink:                   m.GetSelfLink(),
+				UID:                        m.GetUID(),
+				ResourceVersion:            m.GetResourceVersion(),
+				Generation:                 m.GetGeneration(),
+				CreationTimestamp:          m.GetCreationTimestamp(),
+				DeletionTimestamp:          m.GetDeletionTimestamp(),
+				DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(),
+				Labels:                     m.GetLabels(),
+				Annotations:                m.GetAnnotations(),
+				OwnerReferences:            m.GetOwnerReferences(),
+				Finalizers:                 m.GetFinalizers(),
+				ClusterName:                m.GetClusterName(),
+				ManagedFields:              m.GetManagedFields(),
+			},
+		}
+	}
+}
+
+// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion
+// and Kind of an in-memory internal object.
+// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta
+// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube
+// api conventions).
+func TypeAccessor(obj interface{}) (Type, error) {
+	if typed, ok := obj.(runtime.Object); ok {
+		return objectAccessor{typed}, nil
+	}
+	v, err := conversion.EnforcePtr(obj)
+	if err != nil {
+		return nil, err
+	}
+	t := v.Type()
+	if v.Kind() != reflect.Struct {
+		return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface())
+	}
+
+	typeMeta := v.FieldByName("TypeMeta")
+	if !typeMeta.IsValid() {
+		return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t)
+	}
+	a := &genericAccessor{}
+	if err := extractFromTypeMeta(typeMeta, a); err != nil {
+		return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err)
+	}
+	return a, nil
+}
+
+type objectAccessor struct {
+	runtime.Object
+}
+
+func (obj objectAccessor) GetKind() string {
+	return obj.GetObjectKind().GroupVersionKind().Kind
+}
+
+func (obj objectAccessor) SetKind(kind string) {
+	gvk := obj.GetObjectKind().GroupVersionKind()
+	gvk.Kind = kind
+	obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+func (obj objectAccessor) GetAPIVersion() string {
+	return obj.GetObjectKind().GroupVersionKind().GroupVersion().String()
+}
+
+func (obj objectAccessor) SetAPIVersion(version string) {
+	gvk := obj.GetObjectKind().GroupVersionKind()
+	gv, err := schema.ParseGroupVersion(version)
+	if err != nil {
+		gv = schema.GroupVersion{Version: version}
+	}
+	gvk.Group, gvk.Version = gv.Group, gv.Version
+	obj.GetObjectKind().SetGroupVersionKind(gvk)
+}
+
+// NewAccessor returns a MetadataAccessor that can retrieve
+// or manipulate resource version on objects derived from core API
+// metadata concepts.
+func NewAccessor() MetadataAccessor {
+	return resourceAccessor{}
+}
+
+// resourceAccessor implements ResourceVersioner and SelfLinker.
+type resourceAccessor struct{}
+
+func (resourceAccessor) Kind(obj runtime.Object) (string, error) {
+	return objectAccessor{obj}.GetKind(), nil
+}
+
+func (resourceAccessor) SetKind(obj runtime.Object, kind string) error {
+	objectAccessor{obj}.SetKind(kind)
+	return nil
+}
+
+func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) {
+	return objectAccessor{obj}.GetAPIVersion(), nil
+}
+
+func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error {
+	objectAccessor{obj}.SetAPIVersion(version)
+	return nil
+}
+
+func (resourceAccessor) Namespace(obj runtime.Object) (string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetNamespace(), nil
+}
+
+func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetNamespace(namespace)
+	return nil
+}
+
+func (resourceAccessor) Name(obj runtime.Object) (string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetName(), nil
+}
+
+func (resourceAccessor) SetName(obj runtime.Object, name string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetName(name)
+	return nil
+}
+
+func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetGenerateName(), nil
+}
+
+func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetGenerateName(name)
+	return nil
+}
+
+func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetUID(), nil
+}
+
+func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetUID(uid)
+	return nil
+}
+
+func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetSelfLink(), nil
+}
+
+func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetSelfLink(selfLink)
+	return nil
+}
+
+func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return nil, err
+	}
+	return accessor.GetLabels(), nil
+}
+
+func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetLabels(labels)
+	return nil
+}
+
+func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return nil, err
+	}
+	return accessor.GetAnnotations(), nil
+}
+
+func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error {
+	accessor, err := Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetAnnotations(annotations)
+	return nil
+}
+
+func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetResourceVersion(), nil
+}
+
+func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error {
+	accessor, err := CommonAccessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetResourceVersion(version)
+	return nil
+}
+
+func (resourceAccessor) Continue(obj runtime.Object) (string, error) {
+	accessor, err := ListAccessor(obj)
+	if err != nil {
+		return "", err
+	}
+	return accessor.GetContinue(), nil
+}
+
+func (resourceAccessor) SetContinue(obj runtime.Object, version string) error {
+	accessor, err := ListAccessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetContinue(version)
+	return nil
+}
+
+// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object.
+func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
+	if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil {
+		return err
+	}
+	if err := runtime.Field(v, "Kind", &o.Kind); err != nil {
+		return err
+	}
+	if err := runtime.Field(v, "Name", &o.Name); err != nil {
+		return err
+	}
+	if err := runtime.Field(v, "UID", &o.UID); err != nil {
+		return err
+	}
+	var controllerPtr *bool
+	if err := runtime.Field(v, "Controller", &controllerPtr); err != nil {
+		return err
+	}
+	if controllerPtr != nil {
+		controller := *controllerPtr
+		o.Controller = &controller
+	}
+	var blockOwnerDeletionPtr *bool
+	if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil {
+		return err
+	}
+	if blockOwnerDeletionPtr != nil {
+		block := *blockOwnerDeletionPtr
+		o.BlockOwnerDeletion = &block
+	}
+	return nil
+}
+
+// setOwnerReference sets v to o. v is the OwnerReferences field of an object.
+func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error {
+	if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil {
+		return err
+	}
+	if err := runtime.SetField(o.Kind, v, "Kind"); err != nil {
+		return err
+	}
+	if err := runtime.SetField(o.Name, v, "Name"); err != nil {
+		return err
+	}
+	if err := runtime.SetField(o.UID, v, "UID"); err != nil {
+		return err
+	}
+	if o.Controller != nil {
+		controller := *(o.Controller)
+		if err := runtime.SetField(&controller, v, "Controller"); err != nil {
+			return err
+		}
+	}
+	if o.BlockOwnerDeletion != nil {
+		block := *(o.BlockOwnerDeletion)
+		if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// genericAccessor contains pointers to strings that can modify an arbitrary
+// struct and implements the Accessor interface.
+type genericAccessor struct {
+	namespace         *string
+	name              *string
+	generateName      *string
+	uid               *types.UID
+	apiVersion        *string
+	kind              *string
+	resourceVersion   *string
+	selfLink          *string
+	creationTimestamp *metav1.Time
+	deletionTimestamp **metav1.Time
+	labels            *map[string]string
+	annotations       *map[string]string
+	ownerReferences   reflect.Value
+	finalizers        *[]string
+}
+
+func (a genericAccessor) GetNamespace() string {
+	if a.namespace == nil {
+		return ""
+	}
+	return *a.namespace
+}
+
+func (a genericAccessor) SetNamespace(namespace string) {
+	if a.namespace == nil {
+		return
+	}
+	*a.namespace = namespace
+}
+
+func (a genericAccessor) GetName() string {
+	if a.name == nil {
+		return ""
+	}
+	return *a.name
+}
+
+func (a genericAccessor) SetName(name string) {
+	if a.name == nil {
+		return
+	}
+	*a.name = name
+}
+
+func (a genericAccessor) GetGenerateName() string {
+	if a.generateName == nil {
+		return ""
+	}
+	return *a.generateName
+}
+
+func (a genericAccessor) SetGenerateName(generateName string) {
+	if a.generateName == nil {
+		return
+	}
+	*a.generateName = generateName
+}
+
+func (a genericAccessor) GetUID() types.UID {
+	if a.uid == nil {
+		return ""
+	}
+	return *a.uid
+}
+
+func (a genericAccessor) SetUID(uid types.UID) {
+	if a.uid == nil {
+		return
+	}
+	*a.uid = uid
+}
+
+func (a genericAccessor) GetAPIVersion() string {
+	return *a.apiVersion
+}
+
+func (a genericAccessor) SetAPIVersion(version string) {
+	*a.apiVersion = version
+}
+
+func (a genericAccessor) GetKind() string {
+	return *a.kind
+}
+
+func (a genericAccessor) SetKind(kind string) {
+	*a.kind = kind
+}
+
+func (a genericAccessor) GetResourceVersion() string {
+	return *a.resourceVersion
+}
+
+func (a genericAccessor) SetResourceVersion(version string) {
+	*a.resourceVersion = version
+}
+
+func (a genericAccessor) GetSelfLink() string {
+	return *a.selfLink
+}
+
+func (a genericAccessor) SetSelfLink(selfLink string) {
+	*a.selfLink = selfLink
+}
+
+func (a genericAccessor) GetCreationTimestamp() metav1.Time {
+	return *a.creationTimestamp
+}
+
+func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) {
+	*a.creationTimestamp = timestamp
+}
+
+func (a genericAccessor) GetDeletionTimestamp() *metav1.Time {
+	return *a.deletionTimestamp
+}
+
+func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) {
+	*a.deletionTimestamp = timestamp
+}
+
+func (a genericAccessor) GetLabels() map[string]string {
+	if a.labels == nil {
+		return nil
+	}
+	return *a.labels
+}
+
+func (a genericAccessor) SetLabels(labels map[string]string) {
+	*a.labels = labels
+}
+
+func (a genericAccessor) GetAnnotations() map[string]string {
+	if a.annotations == nil {
+		return nil
+	}
+	return *a.annotations
+}
+
+func (a genericAccessor) SetAnnotations(annotations map[string]string) {
+	if a.annotations == nil {
+		emptyAnnotations := make(map[string]string)
+		a.annotations = &emptyAnnotations
+	}
+	*a.annotations = annotations
+}
+
+func (a genericAccessor) GetFinalizers() []string {
+	if a.finalizers == nil {
+		return nil
+	}
+	return *a.finalizers
+}
+
+func (a genericAccessor) SetFinalizers(finalizers []string) {
+	*a.finalizers = finalizers
+}
+
+func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference {
+	var ret []metav1.OwnerReference
+	s := a.ownerReferences
+	if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
+		klog.Errorf("expect %v to be a pointer to slice", s)
+		return ret
+	}
+	s = s.Elem()
+	// Set the capacity to one element greater to avoid copy if the caller later append an element.
+	ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1)
+	for i := 0; i < s.Len(); i++ {
+		if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil {
+			klog.Errorf("extractFromOwnerReference failed: %v", err)
+			return ret
+		}
+	}
+	return ret
+}
+
+func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) {
+	s := a.ownerReferences
+	if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
+		klog.Errorf("expect %v to be a pointer to slice", s)
+	}
+	s = s.Elem()
+	newReferences := reflect.MakeSlice(s.Type(), len(references), len(references))
+	for i := 0; i < len(references); i++ {
+		if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil {
+			klog.Errorf("setOwnerReference failed: %v", err)
+			return
+		}
+	}
+	s.Set(newReferences)
+}
+
+// extractFromTypeMeta extracts pointers to version and kind fields from an object
+func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error {
+	if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil {
+		return err
+	}
+	if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
new file mode 100644
index 0000000..6b01bf1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go
@@ -0,0 +1,210 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+// MultiRESTMapper is a wrapper for multiple RESTMappers.
+type MultiRESTMapper []RESTMapper
+
+func (m MultiRESTMapper) String() string {
+	nested := []string{}
+	for _, t := range m {
+		currString := fmt.Sprintf("%v", t)
+		splitStrings := strings.Split(currString, "\n")
+		nested = append(nested, strings.Join(splitStrings, "\n\t"))
+	}
+
+	return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t"))
+}
+
+// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod)
+// This implementation supports multiple REST schemas and return the first match.
+func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+	for _, t := range m {
+		singular, err = t.ResourceSingularizer(resource)
+		if err == nil {
+			return
+		}
+	}
+	return
+}
+
+func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	allGVRs := []schema.GroupVersionResource{}
+	for _, t := range m {
+		gvrs, err := t.ResourcesFor(resource)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		// walk the existing values to de-dup
+		for _, curr := range gvrs {
+			found := false
+			for _, existing := range allGVRs {
+				if curr == existing {
+					found = true
+					break
+				}
+			}
+
+			if !found {
+				allGVRs = append(allGVRs, curr)
+			}
+		}
+	}
+
+	if len(allGVRs) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: resource}
+	}
+
+	return allGVRs, nil
+}
+
+func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
+	allGVKs := []schema.GroupVersionKind{}
+	for _, t := range m {
+		gvks, err := t.KindsFor(resource)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		// walk the existing values to de-dup
+		for _, curr := range gvks {
+			found := false
+			for _, existing := range allGVKs {
+				if curr == existing {
+					found = true
+					break
+				}
+			}
+
+			if !found {
+				allGVKs = append(allGVKs, curr)
+			}
+		}
+	}
+
+	if len(allGVKs) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: resource}
+	}
+
+	return allGVKs, nil
+}
+
+func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	resources, err := m.ResourcesFor(resource)
+	if err != nil {
+		return schema.GroupVersionResource{}, err
+	}
+	if len(resources) == 1 {
+		return resources[0], nil
+	}
+
+	return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	kinds, err := m.KindsFor(resource)
+	if err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	if len(kinds) == 1 {
+		return kinds[0], nil
+	}
+
+	return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+// RESTMapping provides the REST mapping for the resource based on the
+// kind and version. This implementation supports multiple REST schemas and
+// return the first match.
+func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	allMappings := []*RESTMapping{}
+	errors := []error{}
+
+	for _, t := range m {
+		currMapping, err := t.RESTMapping(gk, versions...)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			errors = append(errors, err)
+			continue
+		}
+
+		allMappings = append(allMappings, currMapping)
+	}
+
+	// if we got exactly one mapping, then use it even if other requested failed
+	if len(allMappings) == 1 {
+		return allMappings[0], nil
+	}
+	if len(allMappings) > 1 {
+		var kinds []schema.GroupVersionKind
+		for _, m := range allMappings {
+			kinds = append(kinds, m.GroupVersionKind)
+		}
+		return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
+	}
+	if len(errors) > 0 {
+		return nil, utilerrors.NewAggregate(errors)
+	}
+	return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+}
+
+// RESTMappings returns all possible RESTMappings for the provided group kind, or an error
+// if the type is not recognized.
+func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	var allMappings []*RESTMapping
+	var errors []error
+
+	for _, t := range m {
+		currMappings, err := t.RESTMappings(gk, versions...)
+		// ignore "no match" errors, but any other error percolates back up
+		if IsNoMatchError(err) {
+			continue
+		}
+		if err != nil {
+			errors = append(errors, err)
+			continue
+		}
+		allMappings = append(allMappings, currMappings...)
+	}
+	if len(errors) > 0 {
+		return nil, utilerrors.NewAggregate(errors)
+	}
+	if len(allMappings) == 0 {
+		return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+	}
+	return allMappings, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
new file mode 100644
index 0000000..fa11c58
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go
@@ -0,0 +1,222 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package meta
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+	AnyGroup    = "*"
+	AnyVersion  = "*"
+	AnyResource = "*"
+	AnyKind     = "*"
+)
+
+// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind
+// when multiple matches are possible
+type PriorityRESTMapper struct {
+	// Delegate is the RESTMapper to use to locate all the Kind and Resource matches
+	Delegate RESTMapper
+
+	// ResourcePriority is a list of priority patterns to apply to matching resources.
+	// The list of all matching resources is narrowed based on the patterns until only one remains.
+	// A pattern with no matches is skipped.  A pattern with more than one match uses its
+	// matches as the list to continue matching against.
+	ResourcePriority []schema.GroupVersionResource
+
+	// KindPriority is a list of priority patterns to apply to matching kinds.
+	// The list of all matching kinds is narrowed based on the patterns until only one remains.
+	// A pattern with no matches is skipped.  A pattern with more than one match uses its
+	// matches as the list to continue matching against.
+	KindPriority []schema.GroupVersionKind
+}
+
+func (m PriorityRESTMapper) String() string {
+	return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate)
+}
+
+// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource)
+	if originalErr != nil && len(originalGVRs) == 0 {
+		return schema.GroupVersionResource{}, originalErr
+	}
+	if len(originalGVRs) == 1 {
+		return originalGVRs[0], originalErr
+	}
+
+	remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...)
+	for _, pattern := range m.ResourcePriority {
+		matchedGVRs := []schema.GroupVersionResource{}
+		for _, gvr := range remainingGVRs {
+			if resourceMatches(pattern, gvr) {
+				matchedGVRs = append(matchedGVRs, gvr)
+			}
+		}
+
+		switch len(matchedGVRs) {
+		case 0:
+			// if you have no matches, then nothing matched this pattern just move to the next
+			continue
+		case 1:
+			// one match, return
+			return matchedGVRs[0], originalErr
+		default:
+			// more than one match, use the matched hits as the list moving to the next pattern.
+			// this way you can have a series of selection criteria
+			remainingGVRs = matchedGVRs
+		}
+	}
+
+	return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs}
+}
+
+// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit.
+func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource)
+	if originalErr != nil && len(originalGVKs) == 0 {
+		return schema.GroupVersionKind{}, originalErr
+	}
+	if len(originalGVKs) == 1 {
+		return originalGVKs[0], originalErr
+	}
+
+	remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...)
+	for _, pattern := range m.KindPriority {
+		matchedGVKs := []schema.GroupVersionKind{}
+		for _, gvr := range remainingGVKs {
+			if kindMatches(pattern, gvr) {
+				matchedGVKs = append(matchedGVKs, gvr)
+			}
+		}
+
+		switch len(matchedGVKs) {
+		case 0:
+			// if you have no matches, then nothing matched this pattern just move to the next
+			continue
+		case 1:
+			// one match, return
+			return matchedGVKs[0], originalErr
+		default:
+			// more than one match, use the matched hits as the list moving to the next pattern.
+			// this way you can have a series of selection criteria
+			remainingGVKs = matchedGVKs
+		}
+	}
+
+	return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs}
+}
+
+func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool {
+	if pattern.Group != AnyGroup && pattern.Group != resource.Group {
+		return false
+	}
+	if pattern.Version != AnyVersion && pattern.Version != resource.Version {
+		return false
+	}
+	if pattern.Resource != AnyResource && pattern.Resource != resource.Resource {
+		return false
+	}
+
+	return true
+}
+
+func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool {
+	if pattern.Group != AnyGroup && pattern.Group != kind.Group {
+		return false
+	}
+	if pattern.Version != AnyVersion && pattern.Version != kind.Version {
+		return false
+	}
+	if pattern.Kind != AnyKind && pattern.Kind != kind.Kind {
+		return false
+	}
+
+	return true
+}
+
+func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) {
+	mappings, originalErr := m.Delegate.RESTMappings(gk, versions...)
+	if originalErr != nil && len(mappings) == 0 {
+		return nil, originalErr
+	}
+
+	// any versions the user provides take priority
+	priorities := m.KindPriority
+	if len(versions) > 0 {
+		priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions))
+		for _, version := range versions {
+			gv := schema.GroupVersion{
+				Version: version,
+				Group:   gk.Group,
+			}
+			priorities = append(priorities, gv.WithKind(AnyKind))
+		}
+		priorities = append(priorities, m.KindPriority...)
+	}
+
+	remaining := append([]*RESTMapping{}, mappings...)
+	for _, pattern := range priorities {
+		var matching []*RESTMapping
+		for _, m := range remaining {
+			if kindMatches(pattern, m.GroupVersionKind) {
+				matching = append(matching, m)
+			}
+		}
+
+		switch len(matching) {
+		case 0:
+			// if you have no matches, then nothing matched this pattern just move to the next
+			continue
+		case 1:
+			// one match, return
+			return matching[0], originalErr
+		default:
+			// more than one match, use the matched hits as the list moving to the next pattern.
+			// this way you can have a series of selection criteria
+			remaining = matching
+		}
+	}
+	if len(remaining) == 1 {
+		return remaining[0], originalErr
+	}
+
+	var kinds []schema.GroupVersionKind
+	for _, m := range mappings {
+		kinds = append(kinds, m.GroupVersionKind)
+	}
+	return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds}
+}
+
+func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	return m.Delegate.RESTMappings(gk, versions...)
+}
+
+func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+	return m.Delegate.ResourceSingularizer(resource)
+}
+
+func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	return m.Delegate.ResourcesFor(partiallySpecifiedResource)
+}
+
+func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) {
+	return m.Delegate.KindsFor(partiallySpecifiedResource)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
new file mode 100644
index 0000000..41b60d7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go
@@ -0,0 +1,518 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// TODO: move everything in this file to pkg/api/rest
+package meta
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Implements RESTScope interface
+type restScope struct {
+	name RESTScopeName
+}
+
+func (r *restScope) Name() RESTScopeName {
+	return r.name
+}
+
+var RESTScopeNamespace = &restScope{
+	name: RESTScopeNameNamespace,
+}
+
+var RESTScopeRoot = &restScope{
+	name: RESTScopeNameRoot,
+}
+
+// DefaultRESTMapper exposes mappings between the types defined in a
+// runtime.Scheme. It assumes that all types defined the provided scheme
+// can be mapped with the provided MetadataAccessor and Codec interfaces.
+//
+// The resource name of a Kind is defined as the lowercase,
+// English-plural version of the Kind string.
+// When converting from resource to Kind, the singular version of the
+// resource name is also accepted for convenience.
+//
+// TODO: Only accept plural for some operations for increased control?
+// (`get pod bar` vs `get pods bar`)
+type DefaultRESTMapper struct {
+	defaultGroupVersions []schema.GroupVersion
+
+	resourceToKind       map[schema.GroupVersionResource]schema.GroupVersionKind
+	kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource
+	kindToScope          map[schema.GroupVersionKind]RESTScope
+	singularToPlural     map[schema.GroupVersionResource]schema.GroupVersionResource
+	pluralToSingular     map[schema.GroupVersionResource]schema.GroupVersionResource
+}
+
+func (m *DefaultRESTMapper) String() string {
+	return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource)
+}
+
+var _ RESTMapper = &DefaultRESTMapper{}
+
+// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion
+// to a resource name and back based on the objects in a runtime.Scheme
+// and the Kubernetes API conventions. Takes a group name, a priority list of the versions
+// to search when an object has no default version (set empty to return an error),
+// and a function that retrieves the correct metadata for a given version.
+func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper {
+	resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind)
+	kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource)
+	kindToScope := make(map[schema.GroupVersionKind]RESTScope)
+	singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
+	pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource)
+	// TODO: verify name mappings work correctly when versions differ
+
+	return &DefaultRESTMapper{
+		resourceToKind:       resourceToKind,
+		kindToPluralResource: kindToPluralResource,
+		kindToScope:          kindToScope,
+		defaultGroupVersions: defaultGroupVersions,
+		singularToPlural:     singularToPlural,
+		pluralToSingular:     pluralToSingular,
+	}
+}
+
+func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) {
+	plural, singular := UnsafeGuessKindToResource(kind)
+	m.AddSpecific(kind, plural, singular, scope)
+}
+
+func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) {
+	m.singularToPlural[singular] = plural
+	m.pluralToSingular[plural] = singular
+
+	m.resourceToKind[singular] = kind
+	m.resourceToKind[plural] = kind
+
+	m.kindToPluralResource[kind] = plural
+	m.kindToScope[kind] = scope
+}
+
+// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular
+// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should.
+// TODO eliminate this so that different callers can correctly map to resources.  This probably means updating all
+// callers to use the RESTMapper they mean.
+var unpluralizedSuffixes = []string{
+	"endpoints",
+}
+
+// UnsafeGuessKindToResource converts Kind to a resource name.
+// Broken. This method only "sort of" works when used outside of this package.  It assumes that Kinds and Resources match
+// and they aren't guaranteed to do so.
+func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) {
+	kindName := kind.Kind
+	if len(kindName) == 0 {
+		return schema.GroupVersionResource{}, schema.GroupVersionResource{}
+	}
+	singularName := strings.ToLower(kindName)
+	singular := kind.GroupVersion().WithResource(singularName)
+
+	for _, skip := range unpluralizedSuffixes {
+		if strings.HasSuffix(singularName, skip) {
+			return singular, singular
+		}
+	}
+
+	switch string(singularName[len(singularName)-1]) {
+	case "s":
+		return kind.GroupVersion().WithResource(singularName + "es"), singular
+	case "y":
+		return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular
+	}
+
+	return kind.GroupVersion().WithResource(singularName + "s"), singular
+}
+
+// ResourceSingularizer implements RESTMapper
+// It converts a resource name from plural to singular (e.g., from pods to pod)
+func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) {
+	partialResource := schema.GroupVersionResource{Resource: resourceType}
+	resources, err := m.ResourcesFor(partialResource)
+	if err != nil {
+		return resourceType, err
+	}
+
+	singular := schema.GroupVersionResource{}
+	for _, curr := range resources {
+		currSingular, ok := m.pluralToSingular[curr]
+		if !ok {
+			continue
+		}
+		if singular.Empty() {
+			singular = currSingular
+			continue
+		}
+
+		if currSingular.Resource != singular.Resource {
+			return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType)
+		}
+	}
+
+	if singular.Empty() {
+		return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType)
+	}
+
+	return singular.Resource, nil
+}
+
+// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior)
+func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource {
+	resource.Resource = strings.ToLower(resource.Resource)
+	if resource.Version == runtime.APIVersionInternal {
+		resource.Version = ""
+	}
+
+	return resource
+}
+
+func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	resource := coerceResourceForMatching(input)
+
+	hasResource := len(resource.Resource) > 0
+	hasGroup := len(resource.Group) > 0
+	hasVersion := len(resource.Version) > 0
+
+	if !hasResource {
+		return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+	}
+
+	ret := []schema.GroupVersionResource{}
+	switch {
+	case hasGroup && hasVersion:
+		// fully qualified.  Find the exact match
+		for plural, singular := range m.pluralToSingular {
+			if singular == resource {
+				ret = append(ret, plural)
+				break
+			}
+			if plural == resource {
+				ret = append(ret, plural)
+				break
+			}
+		}
+
+	case hasGroup:
+		// given a group, prefer an exact match.  If you don't find one, resort to a prefix match on group
+		foundExactMatch := false
+		requestedGroupResource := resource.GroupResource()
+		for plural, singular := range m.pluralToSingular {
+			if singular.GroupResource() == requestedGroupResource {
+				foundExactMatch = true
+				ret = append(ret, plural)
+			}
+			if plural.GroupResource() == requestedGroupResource {
+				foundExactMatch = true
+				ret = append(ret, plural)
+			}
+		}
+
+		// if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
+		// storageclass.storage.k8s.io
+		if !foundExactMatch {
+			for plural, singular := range m.pluralToSingular {
+				if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) {
+					continue
+				}
+				if singular.Resource == requestedGroupResource.Resource {
+					ret = append(ret, plural)
+				}
+				if plural.Resource == requestedGroupResource.Resource {
+					ret = append(ret, plural)
+				}
+			}
+
+		}
+
+	case hasVersion:
+		for plural, singular := range m.pluralToSingular {
+			if singular.Version == resource.Version && singular.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+			if plural.Version == resource.Version && plural.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+		}
+
+	default:
+		for plural, singular := range m.pluralToSingular {
+			if singular.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+			if plural.Resource == resource.Resource {
+				ret = append(ret, plural)
+			}
+		}
+	}
+
+	if len(ret) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: resource}
+	}
+
+	sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions})
+	return ret, nil
+}
+
+func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	resources, err := m.ResourcesFor(resource)
+	if err != nil {
+		return schema.GroupVersionResource{}, err
+	}
+	if len(resources) == 1 {
+		return resources[0], nil
+	}
+
+	return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources}
+}
+
+func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+	resource := coerceResourceForMatching(input)
+
+	hasResource := len(resource.Resource) > 0
+	hasGroup := len(resource.Group) > 0
+	hasVersion := len(resource.Version) > 0
+
+	if !hasResource {
+		return nil, fmt.Errorf("a resource must be present, got: %v", resource)
+	}
+
+	ret := []schema.GroupVersionKind{}
+	switch {
+	// fully qualified.  Find the exact match
+	case hasGroup && hasVersion:
+		kind, exists := m.resourceToKind[resource]
+		if exists {
+			ret = append(ret, kind)
+		}
+
+	case hasGroup:
+		foundExactMatch := false
+		requestedGroupResource := resource.GroupResource()
+		for currResource, currKind := range m.resourceToKind {
+			if currResource.GroupResource() == requestedGroupResource {
+				foundExactMatch = true
+				ret = append(ret, currKind)
+			}
+		}
+
+		// if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match
+		// storageclass.storage.k8s.io
+		if !foundExactMatch {
+			for currResource, currKind := range m.resourceToKind {
+				if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) {
+					continue
+				}
+				if currResource.Resource == requestedGroupResource.Resource {
+					ret = append(ret, currKind)
+				}
+			}
+
+		}
+
+	case hasVersion:
+		for currResource, currKind := range m.resourceToKind {
+			if currResource.Version == resource.Version && currResource.Resource == resource.Resource {
+				ret = append(ret, currKind)
+			}
+		}
+
+	default:
+		for currResource, currKind := range m.resourceToKind {
+			if currResource.Resource == resource.Resource {
+				ret = append(ret, currKind)
+			}
+		}
+	}
+
+	if len(ret) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: input}
+	}
+
+	sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions})
+	return ret, nil
+}
+
+func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	kinds, err := m.KindsFor(resource)
+	if err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	if len(kinds) == 1 {
+		return kinds[0], nil
+	}
+
+	return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds}
+}
+
+type kindByPreferredGroupVersion struct {
+	list      []schema.GroupVersionKind
+	sortOrder []schema.GroupVersion
+}
+
+func (o kindByPreferredGroupVersion) Len() int      { return len(o.list) }
+func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o kindByPreferredGroupVersion) Less(i, j int) bool {
+	lhs := o.list[i]
+	rhs := o.list[j]
+	if lhs == rhs {
+		return false
+	}
+
+	if lhs.GroupVersion() == rhs.GroupVersion() {
+		return lhs.Kind < rhs.Kind
+	}
+
+	// otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+	lhsIndex := -1
+	rhsIndex := -1
+
+	for i := range o.sortOrder {
+		if o.sortOrder[i] == lhs.GroupVersion() {
+			lhsIndex = i
+		}
+		if o.sortOrder[i] == rhs.GroupVersion() {
+			rhsIndex = i
+		}
+	}
+
+	if rhsIndex == -1 {
+		return true
+	}
+
+	return lhsIndex < rhsIndex
+}
+
+type resourceByPreferredGroupVersion struct {
+	list      []schema.GroupVersionResource
+	sortOrder []schema.GroupVersion
+}
+
+func (o resourceByPreferredGroupVersion) Len() int      { return len(o.list) }
+func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] }
+func (o resourceByPreferredGroupVersion) Less(i, j int) bool {
+	lhs := o.list[i]
+	rhs := o.list[j]
+	if lhs == rhs {
+		return false
+	}
+
+	if lhs.GroupVersion() == rhs.GroupVersion() {
+		return lhs.Resource < rhs.Resource
+	}
+
+	// otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order
+	lhsIndex := -1
+	rhsIndex := -1
+
+	for i := range o.sortOrder {
+		if o.sortOrder[i] == lhs.GroupVersion() {
+			lhsIndex = i
+		}
+		if o.sortOrder[i] == rhs.GroupVersion() {
+			rhsIndex = i
+		}
+	}
+
+	if rhsIndex == -1 {
+		return true
+	}
+
+	return lhsIndex < rhsIndex
+}
+
+// RESTMapping returns a struct representing the resource path and conversion interfaces a
+// RESTClient should use to operate on the provided group/kind in order of versions. If a version search
+// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which
+// version should be used to access the named group/kind.
+func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) {
+	mappings, err := m.RESTMappings(gk, versions...)
+	if err != nil {
+		return nil, err
+	}
+	if len(mappings) == 0 {
+		return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+	}
+	// since we rely on RESTMappings method
+	// take the first match and return to the caller
+	// as this was the existing behavior.
+	return mappings[0], nil
+}
+
+// RESTMappings returns the RESTMappings for the provided group kind. If a version search order
+// is not provided, the search order provided to DefaultRESTMapper will be used.
+func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) {
+	mappings := make([]*RESTMapping, 0)
+	potentialGVK := make([]schema.GroupVersionKind, 0)
+	hadVersion := false
+
+	// Pick an appropriate version
+	for _, version := range versions {
+		if len(version) == 0 || version == runtime.APIVersionInternal {
+			continue
+		}
+		currGVK := gk.WithVersion(version)
+		hadVersion = true
+		if _, ok := m.kindToPluralResource[currGVK]; ok {
+			potentialGVK = append(potentialGVK, currGVK)
+			break
+		}
+	}
+	// Use the default preferred versions
+	if !hadVersion && len(potentialGVK) == 0 {
+		for _, gv := range m.defaultGroupVersions {
+			if gv.Group != gk.Group {
+				continue
+			}
+			potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version))
+		}
+	}
+
+	if len(potentialGVK) == 0 {
+		return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions}
+	}
+
+	for _, gvk := range potentialGVK {
+		//Ensure we have a REST mapping
+		res, ok := m.kindToPluralResource[gvk]
+		if !ok {
+			continue
+		}
+
+		// Ensure we have a REST scope
+		scope, ok := m.kindToScope[gvk]
+		if !ok {
+			return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind)
+		}
+
+		mappings = append(mappings, &RESTMapping{
+			Resource:         res,
+			GroupVersionKind: gvk,
+			Scope:            scope,
+		})
+	}
+
+	if len(mappings) == 0 {
+		return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}}
+	}
+	return mappings, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
new file mode 100644
index 0000000..7ac0fe1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS
@@ -0,0 +1,13 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- lavalamp
+- smarterclayton
+- wojtek-t
+- derekwaynecarr
+- mikedanese
+- saad-ali
+- janetkuo
+- xiang90
+- mbohlool
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go
new file mode 100644
index 0000000..a8866a4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go
@@ -0,0 +1,299 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"math/big"
+	"strconv"
+
+	inf "gopkg.in/inf.v0"
+)
+
+// Scale is used for getting and setting the base-10 scaled value.
+// Base-2 scales are omitted for mathematical simplicity.
+// See Quantity.ScaledValue for more details.
+type Scale int32
+
+// infScale adapts a Scale value to an inf.Scale value.
+func (s Scale) infScale() inf.Scale {
+	return inf.Scale(-s) // inf.Scale is upside-down
+}
+
+const (
+	Nano  Scale = -9
+	Micro Scale = -6
+	Milli Scale = -3
+	Kilo  Scale = 3
+	Mega  Scale = 6
+	Giga  Scale = 9
+	Tera  Scale = 12
+	Peta  Scale = 15
+	Exa   Scale = 18
+)
+
+var (
+	Zero = int64Amount{}
+
+	// Used by quantity strings - treat as read only
+	zeroBytes = []byte("0")
+)
+
+// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster
+// than operations on inf.Dec for values that can be represented as int64.
+// +k8s:openapi-gen=true
+type int64Amount struct {
+	value int64
+	scale Scale
+}
+
+// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0.
+func (a int64Amount) Sign() int {
+	switch {
+	case a.value == 0:
+		return 0
+	case a.value > 0:
+		return 1
+	default:
+		return -1
+	}
+}
+
+// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be
+// represented in an int64 OR would result in a loss of precision. This method is intended as
+// an optimization to avoid calling AsDec.
+func (a int64Amount) AsInt64() (int64, bool) {
+	if a.scale == 0 {
+		return a.value, true
+	}
+	if a.scale < 0 {
+		// TODO: attempt to reduce factors, although it is assumed that factors are reduced prior
+		// to the int64Amount being created.
+		return 0, false
+	}
+	return positiveScaleInt64(a.value, a.scale)
+}
+
+// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale,
+// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result
+// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger
+// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would
+// return 1, because 0.000001 is rounded up to 1.
+func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) {
+	if a.scale < scale {
+		result, _ = negativeScaleInt64(a.value, scale-a.scale)
+		return result, true
+	}
+	return positiveScaleInt64(a.value, a.scale-scale)
+}
+
+// AsDec returns an inf.Dec representation of this value.
+func (a int64Amount) AsDec() *inf.Dec {
+	var base inf.Dec
+	base.SetUnscaled(a.value)
+	base.SetScale(inf.Scale(-a.scale))
+	return &base
+}
+
+// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b.
+func (a int64Amount) Cmp(b int64Amount) int {
+	switch {
+	case a.scale == b.scale:
+		// compare only the unscaled portion
+	case a.scale > b.scale:
+		result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale)
+		if !exact {
+			return a.AsDec().Cmp(b.AsDec())
+		}
+		if result == a.value {
+			switch {
+			case remainder == 0:
+				return 0
+			case remainder > 0:
+				return -1
+			default:
+				return 1
+			}
+		}
+		b.value = result
+	default:
+		result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale)
+		if !exact {
+			return a.AsDec().Cmp(b.AsDec())
+		}
+		if result == b.value {
+			switch {
+			case remainder == 0:
+				return 0
+			case remainder > 0:
+				return 1
+			default:
+				return -1
+			}
+		}
+		a.value = result
+	}
+
+	switch {
+	case a.value == b.value:
+		return 0
+	case a.value < b.value:
+		return -1
+	default:
+		return 1
+	}
+}
+
+// Add adds two int64Amounts together, matching scales. It will return false and not mutate
+// a if overflow or underflow would result.
+func (a *int64Amount) Add(b int64Amount) bool {
+	switch {
+	case b.value == 0:
+		return true
+	case a.value == 0:
+		a.value = b.value
+		a.scale = b.scale
+		return true
+	case a.scale == b.scale:
+		c, ok := int64Add(a.value, b.value)
+		if !ok {
+			return false
+		}
+		a.value = c
+	case a.scale > b.scale:
+		c, ok := positiveScaleInt64(a.value, a.scale-b.scale)
+		if !ok {
+			return false
+		}
+		c, ok = int64Add(c, b.value)
+		if !ok {
+			return false
+		}
+		a.scale = b.scale
+		a.value = c
+	default:
+		c, ok := positiveScaleInt64(b.value, b.scale-a.scale)
+		if !ok {
+			return false
+		}
+		c, ok = int64Add(a.value, c)
+		if !ok {
+			return false
+		}
+		a.value = c
+	}
+	return true
+}
+
+// Sub removes the value of b from the current amount, or returns false if underflow would result.
+func (a *int64Amount) Sub(b int64Amount) bool {
+	return a.Add(int64Amount{value: -b.value, scale: b.scale})
+}
+
+// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
+// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
+func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) {
+	if a.scale >= scale {
+		return a, true
+	}
+	result, exact := negativeScaleInt64(a.value, scale-a.scale)
+	return int64Amount{value: result, scale: scale}, exact
+}
+
+// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
+// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
+func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+	mantissa := a.value
+	exponent = int32(a.scale)
+
+	amount, times := removeInt64Factors(mantissa, 10)
+	exponent += int32(times)
+
+	// make sure exponent is a multiple of 3
+	var ok bool
+	switch exponent % 3 {
+	case 1, -2:
+		amount, ok = int64MultiplyScale10(amount)
+		if !ok {
+			return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
+		}
+		exponent = exponent - 1
+	case 2, -1:
+		amount, ok = int64MultiplyScale100(amount)
+		if !ok {
+			return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
+		}
+		exponent = exponent - 2
+	}
+	return strconv.AppendInt(out, amount, 10), exponent
+}
+
+// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
+// return []byte("2048"), 1.
+func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
+	value, ok := a.AsScaledInt64(0)
+	if !ok {
+		return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out)
+	}
+	amount, exponent := removeInt64Factors(value, 1024)
+	return strconv.AppendInt(out, amount, 10), exponent
+}
+
+// infDecAmount implements common operations over an inf.Dec that are specific to the quantity
+// representation.
+type infDecAmount struct {
+	*inf.Dec
+}
+
+// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
+// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
+func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) {
+	tmp := &inf.Dec{}
+	tmp.Round(a.Dec, scale.infScale(), inf.RoundUp)
+	return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0
+}
+
+// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
+// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
+func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+	mantissa := a.Dec.UnscaledBig()
+	exponent = int32(-a.Dec.Scale())
+	amount := big.NewInt(0).Set(mantissa)
+	// move all factors of 10 into the exponent for easy reasoning
+	amount, times := removeBigIntFactors(amount, bigTen)
+	exponent += times
+
+	// make sure exponent is a multiple of 3
+	for exponent%3 != 0 {
+		amount.Mul(amount, bigTen)
+		exponent--
+	}
+
+	return append(out, amount.String()...), exponent
+}
+
+// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
+// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
+// return []byte("2048"), 1.
+func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
+	tmp := &inf.Dec{}
+	tmp.Round(a.Dec, 0, inf.RoundUp)
+	amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024)
+	return append(out, amount.String()...), exponent
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
new file mode 100644
index 0000000..2e09f4f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
@@ -0,0 +1,89 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
+
+package resource
+
+import (
+	fmt "fmt"
+
+	math "math"
+
+	proto "github.com/gogo/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Quantity) Reset()      { *m = Quantity{} }
+func (*Quantity) ProtoMessage() {}
+func (*Quantity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_612bba87bd70906c, []int{0}
+}
+func (m *Quantity) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Quantity.Unmarshal(m, b)
+}
+func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Quantity.Marshal(b, m, deterministic)
+}
+func (m *Quantity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Quantity.Merge(m, src)
+}
+func (m *Quantity) XXX_Size() int {
+	return xxx_messageInfo_Quantity.Size(m)
+}
+func (m *Quantity) XXX_DiscardUnknown() {
+	xxx_messageInfo_Quantity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quantity proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c)
+}
+
+var fileDescriptor_612bba87bd70906c = []byte{
+	// 237 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30,
+	0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b,
+	0xc5, 0xc8, 0xce, 0x00, 0x23, 0x5b, 0x92, 0x1e, 0xae, 0x15, 0xd5, 0x8e, 0x2e, 0x36, 0x52, 0xb7,
+	0x8e, 0x8c, 0x1d, 0x19, 0x9b, 0xbf, 0xe9, 0xd8, 0xb1, 0x03, 0x03, 0x31, 0x3f, 0x82, 0xea, 0x36,
+	0x52, 0xb7, 0x7b, 0xef, 0xf4, 0x4e, 0x97, 0xbd, 0xd4, 0xd3, 0x56, 0x1a, 0xa7, 0xea, 0x50, 0x12,
+	0x5b, 0xf2, 0xd4, 0xaa, 0x4f, 0xb2, 0x33, 0xc7, 0xea, 0xb4, 0x28, 0x1a, 0xb3, 0x28, 0xaa, 0xb9,
+	0xb1, 0xc4, 0x4b, 0xd5, 0xd4, 0xfa, 0x20, 0x14, 0x53, 0xeb, 0x02, 0x57, 0xa4, 0x34, 0x59, 0xe2,
+	0xc2, 0xd3, 0x4c, 0x36, 0xec, 0xbc, 0x1b, 0xdf, 0x1f, 0x2b, 0x79, 0x5e, 0xc9, 0xa6, 0xd6, 0x07,
+	0x21, 0x87, 0xea, 0xf6, 0x51, 0x1b, 0x3f, 0x0f, 0xa5, 0xac, 0xdc, 0x42, 0x69, 0xa7, 0x9d, 0x4a,
+	0x71, 0x19, 0x3e, 0x12, 0x25, 0x48, 0xd3, 0xf1, 0xe8, 0xdd, 0x34, 0x1b, 0xbd, 0x86, 0xc2, 0x7a,
+	0xe3, 0x97, 0xe3, 0xeb, 0xec, 0xa2, 0xf5, 0x6c, 0xac, 0xbe, 0x11, 0x13, 0xf1, 0x70, 0xf9, 0x76,
+	0xa2, 0xa7, 0xab, 0xef, 0x4d, 0x0e, 0x5f, 0x5d, 0x0e, 0xeb, 0x2e, 0x87, 0x4d, 0x97, 0xc3, 0xea,
+	0x67, 0x02, 0xcf, 0x72, 0xdb, 0x23, 0xec, 0x7a, 0x84, 0x7d, 0x8f, 0xb0, 0x8a, 0x28, 0xb6, 0x11,
+	0xc5, 0x2e, 0xa2, 0xd8, 0x47, 0x14, 0xbf, 0x11, 0xc5, 0xfa, 0x0f, 0xe1, 0x7d, 0x34, 0x3c, 0xf6,
+	0x1f, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x08, 0x88, 0x49, 0x0e, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
new file mode 100644
index 0000000..18a6c7c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
@@ -0,0 +1,88 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.api.resource;
+
+// Package-wide variables from generator "generated".
+option go_package = "resource";
+
+// Quantity is a fixed-point representation of a number.
+// It provides convenient marshaling/unmarshaling in JSON and YAML,
+// in addition to String() and AsInt64() accessors.
+//
+// The serialization format is:
+//
+// <quantity>        ::= <signedNumber><suffix>
+//   (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
+// <digit>           ::= 0 | 1 | ... | 9
+// <digits>          ::= <digit> | <digit><digits>
+// <number>          ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
+// <sign>            ::= "+" | "-"
+// <signedNumber>    ::= <number> | <sign><number>
+// <suffix>          ::= <binarySI> | <decimalExponent> | <decimalSI>
+// <binarySI>        ::= Ki | Mi | Gi | Ti | Pi | Ei
+//   (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
+// <decimalSI>       ::= m | "" | k | M | G | T | P | E
+//   (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
+// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
+//
+// No matter which of the three exponent forms is used, no quantity may represent
+// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
+// places. Numbers larger or more precise will be capped or rounded up.
+// (E.g.: 0.1m will rounded up to 1m.)
+// This may be extended in the future if we require larger or smaller quantities.
+//
+// When a Quantity is parsed from a string, it will remember the type of suffix
+// it had, and will use the same type again when it is serialized.
+//
+// Before serializing, Quantity will be put in "canonical form".
+// This means that Exponent/suffix will be adjusted up or down (with a
+// corresponding increase or decrease in Mantissa) such that:
+//   a. No precision is lost
+//   b. No fractional digits will be emitted
+//   c. The exponent (or suffix) is as large as possible.
+// The sign will be omitted unless the number is negative.
+//
+// Examples:
+//   1.5 will be serialized as "1500m"
+//   1.5Gi will be serialized as "1536Mi"
+//
+// Note that the quantity will NEVER be internally represented by a
+// floating point number. That is the whole point of this exercise.
+//
+// Non-canonical values will still parse as long as they are well formed,
+// but will be re-emitted in their canonical form. (So always use canonical
+// form, or don't diff.)
+//
+// This format is intended to make it difficult to use these numbers without
+// writing some sort of special handling code in the hopes that that will
+// cause implementors to also use a fixed point implementation.
+//
+// +protobuf=true
+// +protobuf.embed=string
+// +protobuf.options.marshal=false
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen=true
+// +k8s:openapi-gen=true
+message Quantity {
+  optional string string = 1;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
new file mode 100644
index 0000000..8ffcb9f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
@@ -0,0 +1,310 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"math/big"
+
+	inf "gopkg.in/inf.v0"
+)
+
+const (
+	// maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64.
+	// It is also the maximum decimal digits that can be represented with an int64.
+	maxInt64Factors = 18
+)
+
+var (
+	// Commonly needed big.Int values-- treat as read only!
+	bigTen      = big.NewInt(10)
+	bigZero     = big.NewInt(0)
+	bigOne      = big.NewInt(1)
+	bigThousand = big.NewInt(1000)
+	big1024     = big.NewInt(1024)
+
+	// Commonly needed inf.Dec values-- treat as read only!
+	decZero = inf.NewDec(0, 0)
+	decOne  = inf.NewDec(1, 0)
+
+	// Largest (in magnitude) number allowed.
+	maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64
+
+	// The maximum value we can represent milli-units for.
+	// Compare with the return value of Quantity.Value() to
+	// see if it's safe to use Quantity.MilliValue().
+	MaxMilliValue = int64(((1 << 63) - 1) / 1000)
+)
+
+const mostNegative = -(mostPositive + 1)
+const mostPositive = 1<<63 - 1
+
+// int64Add returns a+b, or false if that would overflow int64.
+func int64Add(a, b int64) (int64, bool) {
+	c := a + b
+	switch {
+	case a > 0 && b > 0:
+		if c < 0 {
+			return 0, false
+		}
+	case a < 0 && b < 0:
+		if c > 0 {
+			return 0, false
+		}
+		if a == mostNegative && b == mostNegative {
+			return 0, false
+		}
+	}
+	return c, true
+}
+
+// int64Multiply returns a*b, or false if that would overflow or underflow int64.
+func int64Multiply(a, b int64) (int64, bool) {
+	if a == 0 || b == 0 || a == 1 || b == 1 {
+		return a * b, true
+	}
+	if a == mostNegative || b == mostNegative {
+		return 0, false
+	}
+	c := a * b
+	return c, c/b == a
+}
+
+// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64.
+// Use when b is known to be greater than one.
+func int64MultiplyScale(a int64, b int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * b, true
+	}
+	if a == mostNegative && b != 1 {
+		return 0, false
+	}
+	c := a * b
+	return c, c/b == a
+}
+
+// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale10(a int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * 10, true
+	}
+	if a == mostNegative {
+		return 0, false
+	}
+	c := a * 10
+	return c, c/10 == a
+}
+
+// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale100(a int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * 100, true
+	}
+	if a == mostNegative {
+		return 0, false
+	}
+	c := a * 100
+	return c, c/100 == a
+}
+
+// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than
+// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication.
+func int64MultiplyScale1000(a int64) (int64, bool) {
+	if a == 0 || a == 1 {
+		return a * 1000, true
+	}
+	if a == mostNegative {
+		return 0, false
+	}
+	c := a * 1000
+	return c, c/1000 == a
+}
+
+// positiveScaleInt64 multiplies base by 10^scale, returning false if the
+// value overflows. Passing a negative scale is undefined.
+func positiveScaleInt64(base int64, scale Scale) (int64, bool) {
+	switch scale {
+	case 0:
+		return base, true
+	case 1:
+		return int64MultiplyScale10(base)
+	case 2:
+		return int64MultiplyScale100(base)
+	case 3:
+		return int64MultiplyScale1000(base)
+	case 6:
+		return int64MultiplyScale(base, 1000000)
+	case 9:
+		return int64MultiplyScale(base, 1000000000)
+	default:
+		value := base
+		var ok bool
+		for i := Scale(0); i < scale; i++ {
+			if value, ok = int64MultiplyScale(value, 10); !ok {
+				return 0, false
+			}
+		}
+		return value, true
+	}
+}
+
+// negativeScaleInt64 reduces base by the provided scale, rounding up, until the
+// value is zero or the scale is reached. Passing a negative scale is undefined.
+// The value returned, if not exact, is rounded away from zero.
+func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) {
+	if scale == 0 {
+		return base, true
+	}
+
+	value := base
+	var fraction bool
+	for i := Scale(0); i < scale; i++ {
+		if !fraction && value%10 != 0 {
+			fraction = true
+		}
+		value = value / 10
+		if value == 0 {
+			if fraction {
+				if base > 0 {
+					return 1, false
+				}
+				return -1, false
+			}
+			return 0, true
+		}
+	}
+	if fraction {
+		if base > 0 {
+			value++
+		} else {
+			value--
+		}
+	}
+	return value, !fraction
+}
+
+func pow10Int64(b int64) int64 {
+	switch b {
+	case 0:
+		return 1
+	case 1:
+		return 10
+	case 2:
+		return 100
+	case 3:
+		return 1000
+	case 4:
+		return 10000
+	case 5:
+		return 100000
+	case 6:
+		return 1000000
+	case 7:
+		return 10000000
+	case 8:
+		return 100000000
+	case 9:
+		return 1000000000
+	case 10:
+		return 10000000000
+	case 11:
+		return 100000000000
+	case 12:
+		return 1000000000000
+	case 13:
+		return 10000000000000
+	case 14:
+		return 100000000000000
+	case 15:
+		return 1000000000000000
+	case 16:
+		return 10000000000000000
+	case 17:
+		return 100000000000000000
+	case 18:
+		return 1000000000000000000
+	default:
+		return 0
+	}
+}
+
+// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or
+// false if no such division is possible. Dividing by negative scales is undefined.
+func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) {
+	if scale == 0 {
+		return base, 0, true
+	}
+	// the max scale representable in base 10 in an int64 is 18 decimal places
+	if scale >= 18 {
+		return 0, base, false
+	}
+	divisor := pow10Int64(int64(scale))
+	return base / divisor, base % divisor, true
+}
+
+// removeInt64Factors divides in a loop; the return values have the property that
+// value == result * base ^ scale
+func removeInt64Factors(value int64, base int64) (result int64, times int32) {
+	times = 0
+	result = value
+	negative := result < 0
+	if negative {
+		result = -result
+	}
+	switch base {
+	// allow the compiler to optimize the common cases
+	case 10:
+		for result >= 10 && result%10 == 0 {
+			times++
+			result = result / 10
+		}
+	// allow the compiler to optimize the common cases
+	case 1024:
+		for result >= 1024 && result%1024 == 0 {
+			times++
+			result = result / 1024
+		}
+	default:
+		for result >= base && result%base == 0 {
+			times++
+			result = result / base
+		}
+	}
+	if negative {
+		result = -result
+	}
+	return result, times
+}
+
+// removeBigIntFactors divides in a loop; the return values have the property that
+// d == result * factor ^ times
+// d may be modified in place.
+// If d == 0, then the return values will be (0, 0)
+func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) {
+	q := big.NewInt(0)
+	m := big.NewInt(0)
+	for d.Cmp(bigZero) != 0 {
+		q.DivMod(d, factor, m)
+		if m.Cmp(bigZero) != 0 {
+			break
+		}
+		times++
+		d, q = q, d
+	}
+	return d, times
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
new file mode 100644
index 0000000..d95e03a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -0,0 +1,733 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math/big"
+	"strconv"
+	"strings"
+
+	inf "gopkg.in/inf.v0"
+)
+
+// Quantity is a fixed-point representation of a number.
+// It provides convenient marshaling/unmarshaling in JSON and YAML,
+// in addition to String() and AsInt64() accessors.
+//
+// The serialization format is:
+//
+// <quantity>        ::= <signedNumber><suffix>
+//   (Note that <suffix> may be empty, from the "" case in <decimalSI>.)
+// <digit>           ::= 0 | 1 | ... | 9
+// <digits>          ::= <digit> | <digit><digits>
+// <number>          ::= <digits> | <digits>.<digits> | <digits>. | .<digits>
+// <sign>            ::= "+" | "-"
+// <signedNumber>    ::= <number> | <sign><number>
+// <suffix>          ::= <binarySI> | <decimalExponent> | <decimalSI>
+// <binarySI>        ::= Ki | Mi | Gi | Ti | Pi | Ei
+//   (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)
+// <decimalSI>       ::= m | "" | k | M | G | T | P | E
+//   (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)
+// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber>
+//
+// No matter which of the three exponent forms is used, no quantity may represent
+// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal
+// places. Numbers larger or more precise will be capped or rounded up.
+// (E.g.: 0.1m will rounded up to 1m.)
+// This may be extended in the future if we require larger or smaller quantities.
+//
+// When a Quantity is parsed from a string, it will remember the type of suffix
+// it had, and will use the same type again when it is serialized.
+//
+// Before serializing, Quantity will be put in "canonical form".
+// This means that Exponent/suffix will be adjusted up or down (with a
+// corresponding increase or decrease in Mantissa) such that:
+//   a. No precision is lost
+//   b. No fractional digits will be emitted
+//   c. The exponent (or suffix) is as large as possible.
+// The sign will be omitted unless the number is negative.
+//
+// Examples:
+//   1.5 will be serialized as "1500m"
+//   1.5Gi will be serialized as "1536Mi"
+//
+// Note that the quantity will NEVER be internally represented by a
+// floating point number. That is the whole point of this exercise.
+//
+// Non-canonical values will still parse as long as they are well formed,
+// but will be re-emitted in their canonical form. (So always use canonical
+// form, or don't diff.)
+//
+// This format is intended to make it difficult to use these numbers without
+// writing some sort of special handling code in the hopes that that will
+// cause implementors to also use a fixed point implementation.
+//
+// +protobuf=true
+// +protobuf.embed=string
+// +protobuf.options.marshal=false
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen=true
+// +k8s:openapi-gen=true
+type Quantity struct {
+	// i is the quantity in int64 scaled form, if d.Dec == nil
+	i int64Amount
+	// d is the quantity in inf.Dec form if d.Dec != nil
+	d infDecAmount
+	// s is the generated value of this quantity to avoid recalculation
+	s string
+
+	// Change Format at will. See the comment for Canonicalize for
+	// more details.
+	Format
+}
+
+// CanonicalValue allows a quantity amount to be converted to a string.
+type CanonicalValue interface {
+	// AsCanonicalBytes returns a byte array representing the string representation
+	// of the value mantissa and an int32 representing its exponent in base-10. Callers may
+	// pass a byte slice to the method to avoid allocations.
+	AsCanonicalBytes(out []byte) ([]byte, int32)
+	// AsCanonicalBase1024Bytes returns a byte array representing the string representation
+	// of the value mantissa and an int32 representing its exponent in base-1024. Callers
+	// may pass a byte slice to the method to avoid allocations.
+	AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
+}
+
+// Format lists the three possible formattings of a quantity.
+type Format string
+
+const (
+	DecimalExponent = Format("DecimalExponent") // e.g., 12e6
+	BinarySI        = Format("BinarySI")        // e.g., 12Mi (12 * 2^20)
+	DecimalSI       = Format("DecimalSI")       // e.g., 12M  (12 * 10^6)
+)
+
+// MustParse turns the given string into a quantity or panics; for tests
+// or others cases where you know the string is valid.
+func MustParse(str string) Quantity {
+	q, err := ParseQuantity(str)
+	if err != nil {
+		panic(fmt.Errorf("cannot parse '%v': %v", str, err))
+	}
+	return q
+}
+
+const (
+	// splitREString is used to separate a number from its suffix; as such,
+	// this is overly permissive, but that's OK-- it will be checked later.
+	splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$"
+)
+
+var (
+	// Errors that could happen while parsing a string.
+	ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
+	ErrNumeric     = errors.New("unable to parse numeric part of quantity")
+	ErrSuffix      = errors.New("unable to parse quantity's suffix")
+)
+
+// parseQuantityString is a fast scanner for quantity values.
+func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
+	positive = true
+	pos := 0
+	end := len(str)
+
+	// handle leading sign
+	if pos < end {
+		switch str[0] {
+		case '-':
+			positive = false
+			pos++
+		case '+':
+			pos++
+		}
+	}
+
+	// strip leading zeros
+Zeroes:
+	for i := pos; ; i++ {
+		if i >= end {
+			num = "0"
+			value = num
+			return
+		}
+		switch str[i] {
+		case '0':
+			pos++
+		default:
+			break Zeroes
+		}
+	}
+
+	// extract the numerator
+Num:
+	for i := pos; ; i++ {
+		if i >= end {
+			num = str[pos:end]
+			value = str[0:end]
+			return
+		}
+		switch str[i] {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		default:
+			num = str[pos:i]
+			pos = i
+			break Num
+		}
+	}
+
+	// if we stripped all numerator positions, always return 0
+	if len(num) == 0 {
+		num = "0"
+	}
+
+	// handle a denominator
+	if pos < end && str[pos] == '.' {
+		pos++
+	Denom:
+		for i := pos; ; i++ {
+			if i >= end {
+				denom = str[pos:end]
+				value = str[0:end]
+				return
+			}
+			switch str[i] {
+			case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			default:
+				denom = str[pos:i]
+				pos = i
+				break Denom
+			}
+		}
+		// TODO: we currently allow 1.G, but we may not want to in the future.
+		// if len(denom) == 0 {
+		// 	err = ErrFormatWrong
+		// 	return
+		// }
+	}
+	value = str[0:pos]
+
+	// grab the elements of the suffix
+	suffixStart := pos
+	for i := pos; ; i++ {
+		if i >= end {
+			suffix = str[suffixStart:end]
+			return
+		}
+		if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
+			pos = i
+			break
+		}
+	}
+	if pos < end {
+		switch str[pos] {
+		case '-', '+':
+			pos++
+		}
+	}
+Suffix:
+	for i := pos; ; i++ {
+		if i >= end {
+			suffix = str[suffixStart:end]
+			return
+		}
+		switch str[i] {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		default:
+			break Suffix
+		}
+	}
+	// we encountered a non decimal in the Suffix loop, but the last character
+	// was not a valid exponent
+	err = ErrFormatWrong
+	return
+}
+
+// ParseQuantity turns str into a Quantity, or returns an error.
+func ParseQuantity(str string) (Quantity, error) {
+	if len(str) == 0 {
+		return Quantity{}, ErrFormatWrong
+	}
+	if str == "0" {
+		return Quantity{Format: DecimalSI, s: str}, nil
+	}
+
+	positive, value, num, denom, suf, err := parseQuantityString(str)
+	if err != nil {
+		return Quantity{}, err
+	}
+
+	base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
+	if !ok {
+		return Quantity{}, ErrSuffix
+	}
+
+	precision := int32(0)
+	scale := int32(0)
+	mantissa := int64(1)
+	switch format {
+	case DecimalExponent, DecimalSI:
+		scale = exponent
+		precision = maxInt64Factors - int32(len(num)+len(denom))
+	case BinarySI:
+		scale = 0
+		switch {
+		case exponent >= 0 && len(denom) == 0:
+			// only handle positive binary numbers with the fast path
+			mantissa = int64(int64(mantissa) << uint64(exponent))
+			// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
+			precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
+		default:
+			precision = -1
+		}
+	}
+
+	if precision >= 0 {
+		// if we have a denominator, shift the entire value to the left by the number of places in the
+		// denominator
+		scale -= int32(len(denom))
+		if scale >= int32(Nano) {
+			shifted := num + denom
+
+			var value int64
+			value, err := strconv.ParseInt(shifted, 10, 64)
+			if err != nil {
+				return Quantity{}, ErrNumeric
+			}
+			if result, ok := int64Multiply(value, int64(mantissa)); ok {
+				if !positive {
+					result = -result
+				}
+				// if the number is in canonical form, reuse the string
+				switch format {
+				case BinarySI:
+					if exponent%10 == 0 && (value&0x07 != 0) {
+						return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
+					}
+				default:
+					if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' {
+						return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil
+					}
+				}
+				return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
+			}
+		}
+	}
+
+	amount := new(inf.Dec)
+	if _, ok := amount.SetString(value); !ok {
+		return Quantity{}, ErrNumeric
+	}
+
+	// So that no one but us has to think about suffixes, remove it.
+	if base == 10 {
+		amount.SetScale(amount.Scale() + Scale(exponent).infScale())
+	} else if base == 2 {
+		// numericSuffix = 2 ** exponent
+		numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent))
+		ub := amount.UnscaledBig()
+		amount.SetUnscaledBig(ub.Mul(ub, numericSuffix))
+	}
+
+	// Cap at min/max bounds.
+	sign := amount.Sign()
+	if sign == -1 {
+		amount.Neg(amount)
+	}
+
+	// This rounds non-zero values up to the minimum representable value, under the theory that
+	// if you want some resources, you should get some resources, even if you asked for way too small
+	// of an amount.  Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have
+	// the side effect of rounding values < .5n to zero.
+	if v, ok := amount.Unscaled(); v != int64(0) || !ok {
+		amount.Round(amount, Nano.infScale(), inf.RoundUp)
+	}
+
+	// The max is just a simple cap.
+	// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
+	if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
+		amount.Set(maxAllowed.Dec)
+	}
+
+	if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
+		// This avoids rounding and hopefully confusion, too.
+		format = DecimalSI
+	}
+	if sign == -1 {
+		amount.Neg(amount)
+	}
+
+	return Quantity{d: infDecAmount{amount}, Format: format}, nil
+}
+
+// DeepCopy returns a deep-copy of the Quantity value.  Note that the method
+// receiver is a value, so we can mutate it in-place and return it.
+func (q Quantity) DeepCopy() Quantity {
+	if q.d.Dec != nil {
+		tmp := &inf.Dec{}
+		q.d.Dec = tmp.Set(q.d.Dec)
+	}
+	return q
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ Quantity) OpenAPISchemaFormat() string { return "" }
+
+// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity).
+//
+// Note about BinarySI:
+// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between
+//   -1 and +1, it will be emitted as if q.Format were DecimalSI.
+// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be
+//   rounded up. (1.1i becomes 2i.)
+func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
+	if q.IsZero() {
+		return zeroBytes, nil
+	}
+
+	var rounded CanonicalValue
+	format := q.Format
+	switch format {
+	case DecimalExponent, DecimalSI:
+	case BinarySI:
+		if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
+			// This avoids rounding and hopefully confusion, too.
+			format = DecimalSI
+		} else {
+			var exact bool
+			if rounded, exact = q.AsScale(0); !exact {
+				// Don't lose precision-- show as DecimalSI
+				format = DecimalSI
+			}
+		}
+	default:
+		format = DecimalExponent
+	}
+
+	// TODO: If BinarySI formatting is requested but would cause rounding, upgrade to
+	// one of the other formats.
+	switch format {
+	case DecimalExponent, DecimalSI:
+		number, exponent := q.AsCanonicalBytes(out)
+		suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
+		return number, suffix
+	default:
+		// format must be BinarySI
+		number, exponent := rounded.AsCanonicalBase1024Bytes(out)
+		suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
+		return number, suffix
+	}
+}
+
+// AsInt64 returns a representation of the current value as an int64 if a fast conversion
+// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
+func (q *Quantity) AsInt64() (int64, bool) {
+	if q.d.Dec != nil {
+		return 0, false
+	}
+	return q.i.AsInt64()
+}
+
+// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
+func (q *Quantity) ToDec() *Quantity {
+	if q.d.Dec == nil {
+		q.d.Dec = q.i.AsDec()
+		q.i = int64Amount{}
+	}
+	return q
+}
+
+// AsDec returns the quantity as represented by a scaled inf.Dec.
+func (q *Quantity) AsDec() *inf.Dec {
+	if q.d.Dec != nil {
+		return q.d.Dec
+	}
+	q.d.Dec = q.i.AsDec()
+	q.i = int64Amount{}
+	return q.d.Dec
+}
+
+// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
+// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
+// allocation.
+func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
+	if q.d.Dec != nil {
+		return q.d.AsCanonicalBytes(out)
+	}
+	return q.i.AsCanonicalBytes(out)
+}
+
+// IsZero returns true if the quantity is equal to zero.
+func (q *Quantity) IsZero() bool {
+	if q.d.Dec != nil {
+		return q.d.Dec.Sign() == 0
+	}
+	return q.i.value == 0
+}
+
+// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
+// quantity is greater than zero.
+func (q *Quantity) Sign() int {
+	if q.d.Dec != nil {
+		return q.d.Dec.Sign()
+	}
+	return q.i.Sign()
+}
+
+// AsScale returns the current value, rounded up to the provided scale, and returns
+// false if the scale resulted in a loss of precision.
+func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
+	if q.d.Dec != nil {
+		return q.d.AsScale(scale)
+	}
+	return q.i.AsScale(scale)
+}
+
+// RoundUp updates the quantity to the provided scale, ensuring that the value is at
+// least 1. False is returned if the rounding operation resulted in a loss of precision.
+// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
+func (q *Quantity) RoundUp(scale Scale) bool {
+	if q.d.Dec != nil {
+		q.s = ""
+		d, exact := q.d.AsScale(scale)
+		q.d = d
+		return exact
+	}
+	// avoid clearing the string value if we have already calculated it
+	if q.i.scale >= scale {
+		return true
+	}
+	q.s = ""
+	i, exact := q.i.AsScale(scale)
+	q.i = i
+	return exact
+}
+
+// Add adds the provide y quantity to the current value. If the current value is zero,
+// the format of the quantity will be updated to the format of y.
+func (q *Quantity) Add(y Quantity) {
+	q.s = ""
+	if q.d.Dec == nil && y.d.Dec == nil {
+		if q.i.value == 0 {
+			q.Format = y.Format
+		}
+		if q.i.Add(y.i) {
+			return
+		}
+	} else if q.IsZero() {
+		q.Format = y.Format
+	}
+	q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
+}
+
+// Sub subtracts the provided quantity from the current value in place. If the current
+// value is zero, the format of the quantity will be updated to the format of y.
+func (q *Quantity) Sub(y Quantity) {
+	q.s = ""
+	if q.IsZero() {
+		q.Format = y.Format
+	}
+	if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
+		return
+	}
+	q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
+}
+
+// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
+// quantity is greater than y.
+func (q *Quantity) Cmp(y Quantity) int {
+	if q.d.Dec == nil && y.d.Dec == nil {
+		return q.i.Cmp(y.i)
+	}
+	return q.AsDec().Cmp(y.AsDec())
+}
+
+// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
+// quantity is greater than y.
+func (q *Quantity) CmpInt64(y int64) int {
+	if q.d.Dec != nil {
+		return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
+	}
+	return q.i.Cmp(int64Amount{value: y})
+}
+
+// Neg sets quantity to be the negative value of itself.
+func (q *Quantity) Neg() {
+	q.s = ""
+	if q.d.Dec == nil {
+		q.i.value = -q.i.value
+		return
+	}
+	q.d.Dec.Neg(q.d.Dec)
+}
+
+// Equal checks equality of two Quantities. This is useful for testing with
+// cmp.Equal.
+func (q Quantity) Equal(v Quantity) bool {
+	return q.Cmp(v) == 0
+}
+
+// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
+// of most Quantity values.
+const int64QuantityExpectedBytes = 18
+
+// String formats the Quantity as a string, caching the result if not calculated.
+// String is an expensive operation and caching this result significantly reduces the cost of
+// normal parse / marshal operations on Quantity.
+func (q *Quantity) String() string {
+	if len(q.s) == 0 {
+		result := make([]byte, 0, int64QuantityExpectedBytes)
+		number, suffix := q.CanonicalizeBytes(result)
+		number = append(number, suffix...)
+		q.s = string(number)
+	}
+	return q.s
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (q Quantity) MarshalJSON() ([]byte, error) {
+	if len(q.s) > 0 {
+		out := make([]byte, len(q.s)+2)
+		out[0], out[len(out)-1] = '"', '"'
+		copy(out[1:], q.s)
+		return out, nil
+	}
+	result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes)
+	result[0] = '"'
+	number, suffix := q.CanonicalizeBytes(result[1:1])
+	// if the same slice was returned to us that we passed in, avoid another allocation by copying number into
+	// the source slice and returning that
+	if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
+		number = append(number, suffix...)
+		number = append(number, '"')
+		return result[:1+len(number)], nil
+	}
+	// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
+	// append
+	result = result[:1]
+	result = append(result, number...)
+	result = append(result, suffix...)
+	result = append(result, '"')
+	return result, nil
+}
+
+// ToUnstructured implements the value.UnstructuredConverter interface.
+func (q Quantity) ToUnstructured() interface{} {
+	return q.String()
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+// TODO: Remove support for leading/trailing whitespace
+func (q *Quantity) UnmarshalJSON(value []byte) error {
+	l := len(value)
+	if l == 4 && bytes.Equal(value, []byte("null")) {
+		q.d.Dec = nil
+		q.i = int64Amount{}
+		return nil
+	}
+	if l >= 2 && value[0] == '"' && value[l-1] == '"' {
+		value = value[1 : l-1]
+	}
+
+	parsed, err := ParseQuantity(strings.TrimSpace(string(value)))
+	if err != nil {
+		return err
+	}
+
+	// This copy is safe because parsed will not be referred to again.
+	*q = parsed
+	return nil
+}
+
+// NewQuantity returns a new Quantity representing the given
+// value in the given format.
+func NewQuantity(value int64, format Format) *Quantity {
+	return &Quantity{
+		i:      int64Amount{value: value},
+		Format: format,
+	}
+}
+
+// NewMilliQuantity returns a new Quantity representing the given
+// value * 1/1000 in the given format. Note that BinarySI formatting
+// will round fractional values, and will be changed to DecimalSI for
+// values x where (-1 < x < 1) && (x != 0).
+func NewMilliQuantity(value int64, format Format) *Quantity {
+	return &Quantity{
+		i:      int64Amount{value: value, scale: -3},
+		Format: format,
+	}
+}
+
+// NewScaledQuantity returns a new Quantity representing the given
+// value * 10^scale in DecimalSI format.
+func NewScaledQuantity(value int64, scale Scale) *Quantity {
+	return &Quantity{
+		i:      int64Amount{value: value, scale: scale},
+		Format: DecimalSI,
+	}
+}
+
+// Value returns the unscaled value of q rounded up to the nearest integer away from 0.
+func (q *Quantity) Value() int64 {
+	return q.ScaledValue(0)
+}
+
+// MilliValue returns the value of ceil(q * 1000); this could overflow an int64;
+// if that's a concern, call Value() first to verify the number is small enough.
+func (q *Quantity) MilliValue() int64 {
+	return q.ScaledValue(Milli)
+}
+
+// ScaledValue returns the value of ceil(q / 10^scale).
+// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000.
+// This could overflow an int64.
+// To detect overflow, call Value() first and verify the expected magnitude.
+func (q *Quantity) ScaledValue(scale Scale) int64 {
+	if q.d.Dec == nil {
+		i, _ := q.i.AsScaledInt64(scale)
+		return i
+	}
+	dec := q.d.Dec
+	return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
+}
+
+// Set sets q's value to be value.
+func (q *Quantity) Set(value int64) {
+	q.SetScaled(value, 0)
+}
+
+// SetMilli sets q's value to be value * 1/1000.
+func (q *Quantity) SetMilli(value int64) {
+	q.SetScaled(value, Milli)
+}
+
+// SetScaled sets q's value to be value * 10^scale
+func (q *Quantity) SetScaled(value int64, scale Scale) {
+	q.s = ""
+	q.d.Dec = nil
+	q.i = int64Amount{value: value, scale: scale}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
new file mode 100644
index 0000000..f89ca16
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
@@ -0,0 +1,288 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"fmt"
+	"io"
+	"math/bits"
+
+	"github.com/gogo/protobuf/proto"
+)
+
+var _ proto.Sizer = &Quantity{}
+
+func (m *Quantity) Marshal() (data []byte, err error) {
+	size := m.Size()
+	data = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(data[:size])
+	if err != nil {
+		return nil, err
+	}
+	return data[:n], nil
+}
+
+// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct
+// with a single string field.
+func (m *Quantity) MarshalTo(data []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(data[:size])
+}
+
+// MarshalToSizedBuffer is a customized version of the generated
+// Protobuf unmarshaler for a struct with a single string field.
+func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) {
+	i := len(data)
+	_ = i
+	var l int
+	_ = l
+
+	// BEGIN CUSTOM MARSHAL
+	out := m.String()
+	i -= len(out)
+	copy(data[i:], out)
+	i = encodeVarintGenerated(data, i, uint64(len(out)))
+	// END CUSTOM MARSHAL
+	i--
+	data[i] = 0xa
+
+	return len(data) - i, nil
+}
+
+func encodeVarintGenerated(data []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		data[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	data[offset] = uint8(v)
+	return base
+}
+
+func (m *Quantity) Size() (n int) {
+	var l int
+	_ = l
+
+	// BEGIN CUSTOM SIZE
+	l = len(m.String())
+	// END CUSTOM SIZE
+
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (bits.Len64(x|1) + 6) / 7
+}
+
+// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct
+// with a single string field.
+func (m *Quantity) Unmarshal(data []byte) error {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Quantity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(data[iNdEx:postIndex])
+
+			// BEGIN CUSTOM DECODE
+			p, err := ParseQuantity(s)
+			if err != nil {
+				return err
+			}
+			*m = p
+			// END CUSTOM DECODE
+
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(data[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func skipGenerated(data []byte) (n int, err error) {
+	l := len(data)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if data[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipGenerated(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go
new file mode 100644
index 0000000..55e177b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"math"
+	"math/big"
+	"sync"
+)
+
+var (
+	// A sync pool to reduce allocation.
+	intPool  sync.Pool
+	maxInt64 = big.NewInt(math.MaxInt64)
+)
+
+func init() {
+	intPool.New = func() interface{} {
+		return &big.Int{}
+	}
+}
+
+// scaledValue scales given unscaled value from scale to new Scale and returns
+// an int64. It ALWAYS rounds up the result when scale down. The final result might
+// overflow.
+//
+// scale, newScale represents the scale of the unscaled decimal.
+// The mathematical value of the decimal is unscaled * 10**(-scale).
+func scaledValue(unscaled *big.Int, scale, newScale int) int64 {
+	dif := scale - newScale
+	if dif == 0 {
+		return unscaled.Int64()
+	}
+
+	// Handle scale up
+	// This is an easy case, we do not need to care about rounding and overflow.
+	// If any intermediate operation causes overflow, the result will overflow.
+	if dif < 0 {
+		return unscaled.Int64() * int64(math.Pow10(-dif))
+	}
+
+	// Handle scale down
+	// We have to be careful about the intermediate operations.
+
+	// fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64
+	const log10MaxInt64 = 19
+	if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 {
+		divide := int64(math.Pow10(dif))
+		result := unscaled.Int64() / divide
+		mod := unscaled.Int64() % divide
+		if mod != 0 {
+			return result + 1
+		}
+		return result
+	}
+
+	// We should only convert back to int64 when getting the result.
+	divisor := intPool.Get().(*big.Int)
+	exp := intPool.Get().(*big.Int)
+	result := intPool.Get().(*big.Int)
+	defer func() {
+		intPool.Put(divisor)
+		intPool.Put(exp)
+		intPool.Put(result)
+	}()
+
+	// divisor = 10^(dif)
+	// TODO: create loop up table if exp costs too much.
+	divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil)
+	// reuse exp
+	remainder := exp
+
+	// result = unscaled / divisor
+	// remainder = unscaled % divisor
+	result.DivMod(unscaled, divisor, remainder)
+	if remainder.Sign() != 0 {
+		return result.Int64() + 1
+	}
+
+	return result.Int64()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go
new file mode 100644
index 0000000..5ed7abe
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go
@@ -0,0 +1,198 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"strconv"
+)
+
+type suffix string
+
+// suffixer can interpret and construct suffixes.
+type suffixer interface {
+	interpret(suffix) (base, exponent int32, fmt Format, ok bool)
+	construct(base, exponent int32, fmt Format) (s suffix, ok bool)
+	constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool)
+}
+
+// quantitySuffixer handles suffixes for all three formats that quantity
+// can handle.
+var quantitySuffixer = newSuffixer()
+
+type bePair struct {
+	base, exponent int32
+}
+
+type listSuffixer struct {
+	suffixToBE      map[suffix]bePair
+	beToSuffix      map[bePair]suffix
+	beToSuffixBytes map[bePair][]byte
+}
+
+func (ls *listSuffixer) addSuffix(s suffix, pair bePair) {
+	if ls.suffixToBE == nil {
+		ls.suffixToBE = map[suffix]bePair{}
+	}
+	if ls.beToSuffix == nil {
+		ls.beToSuffix = map[bePair]suffix{}
+	}
+	if ls.beToSuffixBytes == nil {
+		ls.beToSuffixBytes = map[bePair][]byte{}
+	}
+	ls.suffixToBE[s] = pair
+	ls.beToSuffix[pair] = s
+	ls.beToSuffixBytes[pair] = []byte(s)
+}
+
+func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) {
+	pair, ok := ls.suffixToBE[s]
+	if !ok {
+		return 0, 0, false
+	}
+	return pair.base, pair.exponent, true
+}
+
+func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) {
+	s, ok = ls.beToSuffix[bePair{base, exponent}]
+	return
+}
+
+func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) {
+	s, ok = ls.beToSuffixBytes[bePair{base, exponent}]
+	return
+}
+
+type suffixHandler struct {
+	decSuffixes listSuffixer
+	binSuffixes listSuffixer
+}
+
+type fastLookup struct {
+	*suffixHandler
+}
+
+func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) {
+	switch s {
+	case "":
+		return 10, 0, DecimalSI, true
+	case "n":
+		return 10, -9, DecimalSI, true
+	case "u":
+		return 10, -6, DecimalSI, true
+	case "m":
+		return 10, -3, DecimalSI, true
+	case "k":
+		return 10, 3, DecimalSI, true
+	case "M":
+		return 10, 6, DecimalSI, true
+	case "G":
+		return 10, 9, DecimalSI, true
+	}
+	return l.suffixHandler.interpret(s)
+}
+
+func newSuffixer() suffixer {
+	sh := &suffixHandler{}
+
+	// IMPORTANT: if you change this section you must change fastLookup
+
+	sh.binSuffixes.addSuffix("Ki", bePair{2, 10})
+	sh.binSuffixes.addSuffix("Mi", bePair{2, 20})
+	sh.binSuffixes.addSuffix("Gi", bePair{2, 30})
+	sh.binSuffixes.addSuffix("Ti", bePair{2, 40})
+	sh.binSuffixes.addSuffix("Pi", bePair{2, 50})
+	sh.binSuffixes.addSuffix("Ei", bePair{2, 60})
+	// Don't emit an error when trying to produce
+	// a suffix for 2^0.
+	sh.decSuffixes.addSuffix("", bePair{2, 0})
+
+	sh.decSuffixes.addSuffix("n", bePair{10, -9})
+	sh.decSuffixes.addSuffix("u", bePair{10, -6})
+	sh.decSuffixes.addSuffix("m", bePair{10, -3})
+	sh.decSuffixes.addSuffix("", bePair{10, 0})
+	sh.decSuffixes.addSuffix("k", bePair{10, 3})
+	sh.decSuffixes.addSuffix("M", bePair{10, 6})
+	sh.decSuffixes.addSuffix("G", bePair{10, 9})
+	sh.decSuffixes.addSuffix("T", bePair{10, 12})
+	sh.decSuffixes.addSuffix("P", bePair{10, 15})
+	sh.decSuffixes.addSuffix("E", bePair{10, 18})
+
+	return fastLookup{sh}
+}
+
+func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) {
+	switch fmt {
+	case DecimalSI:
+		return sh.decSuffixes.construct(base, exponent)
+	case BinarySI:
+		return sh.binSuffixes.construct(base, exponent)
+	case DecimalExponent:
+		if base != 10 {
+			return "", false
+		}
+		if exponent == 0 {
+			return "", true
+		}
+		return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true
+	}
+	return "", false
+}
+
+func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) {
+	switch format {
+	case DecimalSI:
+		return sh.decSuffixes.constructBytes(base, exponent)
+	case BinarySI:
+		return sh.binSuffixes.constructBytes(base, exponent)
+	case DecimalExponent:
+		if base != 10 {
+			return nil, false
+		}
+		if exponent == 0 {
+			return nil, true
+		}
+		result := make([]byte, 8, 8)
+		result[0] = 'e'
+		number := strconv.AppendInt(result[1:1], int64(exponent), 10)
+		if &result[1] == &number[0] {
+			return result[:1+len(number)], true
+		}
+		result = append(result[:1], number...)
+		return result, true
+	}
+	return nil, false
+}
+
+func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) {
+	// Try lookup tables first
+	if b, e, ok := sh.decSuffixes.lookup(suffix); ok {
+		return b, e, DecimalSI, true
+	}
+	if b, e, ok := sh.binSuffixes.lookup(suffix); ok {
+		return b, e, BinarySI, true
+	}
+
+	if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') {
+		parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64)
+		if err != nil {
+			return 0, 0, DecimalExponent, false
+		}
+		return 10, int32(parsed), DecimalExponent, true
+	}
+
+	return 0, 0, DecimalExponent, false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
new file mode 100644
index 0000000..ab47407
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go
@@ -0,0 +1,27 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package resource
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Quantity) DeepCopyInto(out *Quantity) {
+	*out = in.DeepCopy()
+	return
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
new file mode 100644
index 0000000..15b4c87
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -0,0 +1,31 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+- thockin
+- smarterclayton
+- wojtek-t
+- deads2k
+- brendandburns
+- caesarxuchao
+- liggitt
+- nikhiljindal
+- gmarek
+- erictune
+- davidopp
+- sttts
+- quinton-hoole
+- luxas
+- janetkuo
+- justinsb
+- ncdc
+- soltysh
+- dims
+- madhusudancs
+- hongchaodeng
+- krousey
+- mml
+- mbohlool
+- therc
+- mqliang
+- kevin-wangzefeng
+- jianhuiz
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
new file mode 100644
index 0000000..15b45ff
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// IsControlledBy checks if the  object has a controllerRef set to the given owner
+func IsControlledBy(obj Object, owner Object) bool {
+	ref := GetControllerOfNoCopy(obj)
+	if ref == nil {
+		return false
+	}
+	return ref.UID == owner.GetUID()
+}
+
+// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller
+func GetControllerOf(controllee Object) *OwnerReference {
+	ref := GetControllerOfNoCopy(controllee)
+	if ref == nil {
+		return nil
+	}
+	cp := *ref
+	return &cp
+}
+
+// GetControllerOf returns a pointer to the controllerRef if controllee has a controller
+func GetControllerOfNoCopy(controllee Object) *OwnerReference {
+	refs := controllee.GetOwnerReferences()
+	for i := range refs {
+		if refs[i].Controller != nil && *refs[i].Controller {
+			return &refs[i]
+		}
+	}
+	return nil
+}
+
+// NewControllerRef creates an OwnerReference pointing to the given owner.
+func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
+	blockOwnerDeletion := true
+	isController := true
+	return &OwnerReference{
+		APIVersion:         gvk.GroupVersion().String(),
+		Kind:               gvk.Kind,
+		Name:               owner.GetName(),
+		UID:                owner.GetUID(),
+		BlockOwnerDeletion: &blockOwnerDeletion,
+		Controller:         &isController,
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
new file mode 100644
index 0000000..8eaebb8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
@@ -0,0 +1,355 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+	"net/url"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/api/resource"
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = float64(**in)
+	return nil
+}
+
+func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error {
+	temp := float64(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = int32(**in)
+	return nil
+}
+
+func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error {
+	temp := int32(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_int64_To_int64(in **int64, out *int64, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = int64(**in)
+	return nil
+}
+
+func Convert_int64_To_Pointer_int64(in *int64, out **int64, s conversion.Scope) error {
+	temp := int64(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error {
+	if *in == nil {
+		*out = 0
+		return nil
+	}
+	*out = int(**in)
+	return nil
+}
+
+func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error {
+	temp := int64(*in)
+	*out = &temp
+	return nil
+}
+
+func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error {
+	if *in == nil {
+		*out = ""
+		return nil
+	}
+	*out = **in
+	return nil
+}
+
+func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error {
+	if in == nil {
+		stringVar := ""
+		*out = &stringVar
+		return nil
+	}
+	*out = in
+	return nil
+}
+
+func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error {
+	if *in == nil {
+		*out = false
+		return nil
+	}
+	*out = **in
+	return nil
+}
+
+func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error {
+	if in == nil {
+		boolVar := false
+		*out = &boolVar
+		return nil
+	}
+	*out = in
+	return nil
+}
+
+// +k8s:conversion-fn=drop
+func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error {
+	// These values are explicitly not copied
+	//out.APIVersion = in.APIVersion
+	//out.Kind = in.Kind
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) error {
+	*out = *in
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_DeleteOptions_To_v1_DeleteOptions(in, out *DeleteOptions, s conversion.Scope) error {
+	*out = *in
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error {
+	*out = *in
+	return nil
+}
+
+func Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(in **intstr.IntOrString, out *intstr.IntOrString, s conversion.Scope) error {
+	if *in == nil {
+		*out = intstr.IntOrString{} // zero value
+		return nil
+	}
+	*out = **in // copy
+	return nil
+}
+
+func Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(in *intstr.IntOrString, out **intstr.IntOrString, s conversion.Scope) error {
+	temp := *in // copy
+	*out = &temp
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error {
+	// Cannot deep copy these, because time.Time has unexported fields.
+	*out = *in
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_v1_MicroTime_To_v1_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error {
+	// Cannot deep copy these, because time.Time has unexported fields.
+	*out = *in
+	return nil
+}
+
+func Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error {
+	if *in == nil {
+		*out = Duration{} // zero duration
+		return nil
+	}
+	*out = **in // copy
+	return nil
+}
+
+func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error {
+	temp := *in //copy
+	*out = &temp
+	return nil
+}
+
+// Convert_Slice_string_To_v1_Time allows converting a URL query parameter value
+func Convert_Slice_string_To_v1_Time(in *[]string, out *Time, s conversion.Scope) error {
+	str := ""
+	if len(*in) > 0 {
+		str = (*in)[0]
+	}
+	return out.UnmarshalQueryParameter(str)
+}
+
+func Convert_Slice_string_To_Pointer_v1_Time(in *[]string, out **Time, s conversion.Scope) error {
+	if in == nil {
+		return nil
+	}
+	str := ""
+	if len(*in) > 0 {
+		str = (*in)[0]
+	}
+	temp := Time{}
+	if err := temp.UnmarshalQueryParameter(str); err != nil {
+		return err
+	}
+	*out = &temp
+	return nil
+}
+
+func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error {
+	selector, err := labels.Parse(*in)
+	if err != nil {
+		return err
+	}
+	*out = selector
+	return nil
+}
+
+func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error {
+	selector, err := fields.ParseSelector(*in)
+	if err != nil {
+		return err
+	}
+	*out = selector
+	return nil
+}
+
+func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error {
+	if *in == nil {
+		return nil
+	}
+	*out = (*in).String()
+	return nil
+}
+
+func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error {
+	if *in == nil {
+		return nil
+	}
+	*out = (*in).String()
+	return nil
+}
+
+// +k8s:conversion-fn=copy-only
+func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error {
+	*out = *in
+	return nil
+}
+
+func Convert_Map_string_To_string_To_v1_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error {
+	if in == nil {
+		return nil
+	}
+	for labelKey, labelValue := range *in {
+		AddLabelToSelector(out, labelKey, labelValue)
+	}
+	return nil
+}
+
+func Convert_v1_LabelSelector_To_Map_string_To_string(in *LabelSelector, out *map[string]string, s conversion.Scope) error {
+	var err error
+	*out, err = LabelSelectorAsMap(in)
+	return err
+}
+
+// Convert_Slice_string_To_Slice_int32 converts multiple query parameters or
+// a single query parameter with a comma delimited value to multiple int32.
+// This is used for port forwarding which needs the ports as int32.
+func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error {
+	for _, s := range *in {
+		for _, v := range strings.Split(s, ",") {
+			x, err := strconv.ParseUint(v, 10, 16)
+			if err != nil {
+				return fmt.Errorf("cannot convert to []int32: %v", err)
+			}
+			*out = append(*out, int32(x))
+		}
+	}
+	return nil
+}
+
+// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy
+func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error {
+	var str string
+	if len(*in) > 0 {
+		str = (*in)[0]
+	} else {
+		str = ""
+	}
+	temp := DeletionPropagation(str)
+	*out = &temp
+	return nil
+}
+
+// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value
+func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error {
+	if len(*in) > 0 {
+		*out = IncludeObjectPolicy((*in)[0])
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions.
+func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error {
+	if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil {
+		return err
+	}
+
+	uid := types.UID("")
+	if values, ok := (*in)["uid"]; ok && len(values) > 0 {
+		uid = types.UID(values[0])
+	}
+
+	resourceVersion := ""
+	if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 {
+		resourceVersion = values[0]
+	}
+
+	if len(uid) > 0 || len(resourceVersion) > 0 {
+		if out.Preconditions == nil {
+			out.Preconditions = &Preconditions{}
+		}
+		if len(uid) > 0 {
+			out.Preconditions.UID = &uid
+		}
+		if len(resourceVersion) > 0 {
+			out.Preconditions.ResourceVersion = &resourceVersion
+		}
+	}
+	return nil
+}
+
+// Convert_Slice_string_To_v1_ResourceVersionMatch allows converting a URL query parameter to ResourceVersionMatch
+func Convert_Slice_string_To_v1_ResourceVersionMatch(in *[]string, out *ResourceVersionMatch, s conversion.Scope) error {
+	if len(*in) > 0 {
+		*out = ResourceVersionMatch((*in)[0])
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go
new file mode 100644
index 0000000..8751d05
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+func (in *TableRow) DeepCopy() *TableRow {
+	if in == nil {
+		return nil
+	}
+
+	out := new(TableRow)
+
+	if in.Cells != nil {
+		out.Cells = make([]interface{}, len(in.Cells))
+		for i := range in.Cells {
+			out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i])
+		}
+	}
+
+	if in.Conditions != nil {
+		out.Conditions = make([]TableRowCondition, len(in.Conditions))
+		for i := range in.Conditions {
+			in.Conditions[i].DeepCopyInto(&out.Conditions[i])
+		}
+	}
+
+	in.Object.DeepCopyInto(&out.Object)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
new file mode 100644
index 0000000..7736753
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:conversion-gen=false
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=meta.k8s.io
+
+package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1"
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
new file mode 100644
index 0000000..a22b078
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"time"
+)
+
+// Duration is a wrapper around time.Duration which supports correct
+// marshaling to YAML and JSON. In particular, it marshals into strings, which
+// can be used as map keys in json.
+type Duration struct {
+	time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"`
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+	var str string
+	err := json.Unmarshal(b, &str)
+	if err != nil {
+		return err
+	}
+
+	pd, err := time.ParseDuration(str)
+	if err != nil {
+		return err
+	}
+	d.Duration = pd
+	return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d Duration) MarshalJSON() ([]byte, error) {
+	return json.Marshal(d.Duration.String())
+}
+
+// ToUnstructured implements the value.UnstructuredConverter interface.
+func (d Duration) ToUnstructured() interface{} {
+	return d.Duration.String()
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ Duration) OpenAPISchemaFormat() string { return "" }
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
new file mode 100644
index 0000000..e74a510
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -0,0 +1,11477 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+
+package v1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+	time "time"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *APIGroup) Reset()      { *m = APIGroup{} }
+func (*APIGroup) ProtoMessage() {}
+func (*APIGroup) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{0}
+}
+func (m *APIGroup) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *APIGroup) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_APIGroup.Merge(m, src)
+}
+func (m *APIGroup) XXX_Size() int {
+	return m.Size()
+}
+func (m *APIGroup) XXX_DiscardUnknown() {
+	xxx_messageInfo_APIGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIGroup proto.InternalMessageInfo
+
+func (m *APIGroupList) Reset()      { *m = APIGroupList{} }
+func (*APIGroupList) ProtoMessage() {}
+func (*APIGroupList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{1}
+}
+func (m *APIGroupList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *APIGroupList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_APIGroupList.Merge(m, src)
+}
+func (m *APIGroupList) XXX_Size() int {
+	return m.Size()
+}
+func (m *APIGroupList) XXX_DiscardUnknown() {
+	xxx_messageInfo_APIGroupList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIGroupList proto.InternalMessageInfo
+
+func (m *APIResource) Reset()      { *m = APIResource{} }
+func (*APIResource) ProtoMessage() {}
+func (*APIResource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{2}
+}
+func (m *APIResource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *APIResource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_APIResource.Merge(m, src)
+}
+func (m *APIResource) XXX_Size() int {
+	return m.Size()
+}
+func (m *APIResource) XXX_DiscardUnknown() {
+	xxx_messageInfo_APIResource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIResource proto.InternalMessageInfo
+
+func (m *APIResourceList) Reset()      { *m = APIResourceList{} }
+func (*APIResourceList) ProtoMessage() {}
+func (*APIResourceList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{3}
+}
+func (m *APIResourceList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *APIResourceList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_APIResourceList.Merge(m, src)
+}
+func (m *APIResourceList) XXX_Size() int {
+	return m.Size()
+}
+func (m *APIResourceList) XXX_DiscardUnknown() {
+	xxx_messageInfo_APIResourceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIResourceList proto.InternalMessageInfo
+
+func (m *APIVersions) Reset()      { *m = APIVersions{} }
+func (*APIVersions) ProtoMessage() {}
+func (*APIVersions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{4}
+}
+func (m *APIVersions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *APIVersions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_APIVersions.Merge(m, src)
+}
+func (m *APIVersions) XXX_Size() int {
+	return m.Size()
+}
+func (m *APIVersions) XXX_DiscardUnknown() {
+	xxx_messageInfo_APIVersions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_APIVersions proto.InternalMessageInfo
+
+func (m *Condition) Reset()      { *m = Condition{} }
+func (*Condition) ProtoMessage() {}
+func (*Condition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{5}
+}
+func (m *Condition) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Condition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Condition.Merge(m, src)
+}
+func (m *Condition) XXX_Size() int {
+	return m.Size()
+}
+func (m *Condition) XXX_DiscardUnknown() {
+	xxx_messageInfo_Condition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Condition proto.InternalMessageInfo
+
+func (m *CreateOptions) Reset()      { *m = CreateOptions{} }
+func (*CreateOptions) ProtoMessage() {}
+func (*CreateOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{6}
+}
+func (m *CreateOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CreateOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CreateOptions.Merge(m, src)
+}
+func (m *CreateOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *CreateOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_CreateOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CreateOptions proto.InternalMessageInfo
+
+func (m *DeleteOptions) Reset()      { *m = DeleteOptions{} }
+func (*DeleteOptions) ProtoMessage() {}
+func (*DeleteOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{7}
+}
+func (m *DeleteOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DeleteOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteOptions.Merge(m, src)
+}
+func (m *DeleteOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo
+
+func (m *Duration) Reset()      { *m = Duration{} }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{8}
+}
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Duration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Duration.Merge(m, src)
+}
+func (m *Duration) XXX_Size() int {
+	return m.Size()
+}
+func (m *Duration) XXX_DiscardUnknown() {
+	xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
+
+func (m *ExportOptions) Reset()      { *m = ExportOptions{} }
+func (*ExportOptions) ProtoMessage() {}
+func (*ExportOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{9}
+}
+func (m *ExportOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ExportOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExportOptions.Merge(m, src)
+}
+func (m *ExportOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExportOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExportOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExportOptions proto.InternalMessageInfo
+
+func (m *FieldsV1) Reset()      { *m = FieldsV1{} }
+func (*FieldsV1) ProtoMessage() {}
+func (*FieldsV1) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{10}
+}
+func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *FieldsV1) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldsV1.Merge(m, src)
+}
+func (m *FieldsV1) XXX_Size() int {
+	return m.Size()
+}
+func (m *FieldsV1) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldsV1.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
+
+func (m *GetOptions) Reset()      { *m = GetOptions{} }
+func (*GetOptions) ProtoMessage() {}
+func (*GetOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{11}
+}
+func (m *GetOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GetOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOptions.Merge(m, src)
+}
+func (m *GetOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *GetOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOptions proto.InternalMessageInfo
+
+func (m *GroupKind) Reset()      { *m = GroupKind{} }
+func (*GroupKind) ProtoMessage() {}
+func (*GroupKind) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{12}
+}
+func (m *GroupKind) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GroupKind) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupKind.Merge(m, src)
+}
+func (m *GroupKind) XXX_Size() int {
+	return m.Size()
+}
+func (m *GroupKind) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupKind.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupKind proto.InternalMessageInfo
+
+func (m *GroupResource) Reset()      { *m = GroupResource{} }
+func (*GroupResource) ProtoMessage() {}
+func (*GroupResource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{13}
+}
+func (m *GroupResource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GroupResource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupResource.Merge(m, src)
+}
+func (m *GroupResource) XXX_Size() int {
+	return m.Size()
+}
+func (m *GroupResource) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupResource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupResource proto.InternalMessageInfo
+
+func (m *GroupVersion) Reset()      { *m = GroupVersion{} }
+func (*GroupVersion) ProtoMessage() {}
+func (*GroupVersion) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{14}
+}
+func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GroupVersion) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupVersion.Merge(m, src)
+}
+func (m *GroupVersion) XXX_Size() int {
+	return m.Size()
+}
+func (m *GroupVersion) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
+
+func (m *GroupVersionForDiscovery) Reset()      { *m = GroupVersionForDiscovery{} }
+func (*GroupVersionForDiscovery) ProtoMessage() {}
+func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{15}
+}
+func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src)
+}
+func (m *GroupVersionForDiscovery) XXX_Size() int {
+	return m.Size()
+}
+func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
+
+func (m *GroupVersionKind) Reset()      { *m = GroupVersionKind{} }
+func (*GroupVersionKind) ProtoMessage() {}
+func (*GroupVersionKind) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{16}
+}
+func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GroupVersionKind) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupVersionKind.Merge(m, src)
+}
+func (m *GroupVersionKind) XXX_Size() int {
+	return m.Size()
+}
+func (m *GroupVersionKind) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupVersionKind.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
+
+func (m *GroupVersionResource) Reset()      { *m = GroupVersionResource{} }
+func (*GroupVersionResource) ProtoMessage() {}
+func (*GroupVersionResource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{17}
+}
+func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GroupVersionResource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupVersionResource.Merge(m, src)
+}
+func (m *GroupVersionResource) XXX_Size() int {
+	return m.Size()
+}
+func (m *GroupVersionResource) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupVersionResource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
+
+func (m *LabelSelector) Reset()      { *m = LabelSelector{} }
+func (*LabelSelector) ProtoMessage() {}
+func (*LabelSelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{18}
+}
+func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LabelSelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelSelector.Merge(m, src)
+}
+func (m *LabelSelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *LabelSelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_LabelSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
+
+func (m *LabelSelectorRequirement) Reset()      { *m = LabelSelectorRequirement{} }
+func (*LabelSelectorRequirement) ProtoMessage() {}
+func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{19}
+}
+func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LabelSelectorRequirement.Merge(m, src)
+}
+func (m *LabelSelectorRequirement) XXX_Size() int {
+	return m.Size()
+}
+func (m *LabelSelectorRequirement) XXX_DiscardUnknown() {
+	xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
+
+func (m *List) Reset()      { *m = List{} }
+func (*List) ProtoMessage() {}
+func (*List) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{20}
+}
+func (m *List) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *List) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_List.Merge(m, src)
+}
+func (m *List) XXX_Size() int {
+	return m.Size()
+}
+func (m *List) XXX_DiscardUnknown() {
+	xxx_messageInfo_List.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_List proto.InternalMessageInfo
+
+func (m *ListMeta) Reset()      { *m = ListMeta{} }
+func (*ListMeta) ProtoMessage() {}
+func (*ListMeta) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{21}
+}
+func (m *ListMeta) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ListMeta) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListMeta.Merge(m, src)
+}
+func (m *ListMeta) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListMeta) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListMeta.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListMeta proto.InternalMessageInfo
+
+func (m *ListOptions) Reset()      { *m = ListOptions{} }
+func (*ListOptions) ProtoMessage() {}
+func (*ListOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{22}
+}
+func (m *ListOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ListOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListOptions.Merge(m, src)
+}
+func (m *ListOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *ListOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListOptions proto.InternalMessageInfo
+
+func (m *ManagedFieldsEntry) Reset()      { *m = ManagedFieldsEntry{} }
+func (*ManagedFieldsEntry) ProtoMessage() {}
+func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{23}
+}
+func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ManagedFieldsEntry.Merge(m, src)
+}
+func (m *ManagedFieldsEntry) XXX_Size() int {
+	return m.Size()
+}
+func (m *ManagedFieldsEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
+
+func (m *MicroTime) Reset()      { *m = MicroTime{} }
+func (*MicroTime) ProtoMessage() {}
+func (*MicroTime) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{24}
+}
+func (m *MicroTime) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MicroTime.Unmarshal(m, b)
+}
+func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic)
+}
+func (m *MicroTime) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MicroTime.Merge(m, src)
+}
+func (m *MicroTime) XXX_Size() int {
+	return xxx_messageInfo_MicroTime.Size(m)
+}
+func (m *MicroTime) XXX_DiscardUnknown() {
+	xxx_messageInfo_MicroTime.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MicroTime proto.InternalMessageInfo
+
+func (m *ObjectMeta) Reset()      { *m = ObjectMeta{} }
+func (*ObjectMeta) ProtoMessage() {}
+func (*ObjectMeta) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{25}
+}
+func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ObjectMeta) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ObjectMeta.Merge(m, src)
+}
+func (m *ObjectMeta) XXX_Size() int {
+	return m.Size()
+}
+func (m *ObjectMeta) XXX_DiscardUnknown() {
+	xxx_messageInfo_ObjectMeta.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
+
+func (m *OwnerReference) Reset()      { *m = OwnerReference{} }
+func (*OwnerReference) ProtoMessage() {}
+func (*OwnerReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{26}
+}
+func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *OwnerReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OwnerReference.Merge(m, src)
+}
+func (m *OwnerReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *OwnerReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_OwnerReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
+
+func (m *PartialObjectMetadata) Reset()      { *m = PartialObjectMetadata{} }
+func (*PartialObjectMetadata) ProtoMessage() {}
+func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{27}
+}
+func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PartialObjectMetadata.Merge(m, src)
+}
+func (m *PartialObjectMetadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *PartialObjectMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
+
+func (m *PartialObjectMetadataList) Reset()      { *m = PartialObjectMetadataList{} }
+func (*PartialObjectMetadataList) ProtoMessage() {}
+func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{28}
+}
+func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
+}
+func (m *PartialObjectMetadataList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
+
+func (m *Patch) Reset()      { *m = Patch{} }
+func (*Patch) ProtoMessage() {}
+func (*Patch) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{29}
+}
+func (m *Patch) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Patch) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Patch.Merge(m, src)
+}
+func (m *Patch) XXX_Size() int {
+	return m.Size()
+}
+func (m *Patch) XXX_DiscardUnknown() {
+	xxx_messageInfo_Patch.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Patch proto.InternalMessageInfo
+
+func (m *PatchOptions) Reset()      { *m = PatchOptions{} }
+func (*PatchOptions) ProtoMessage() {}
+func (*PatchOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{30}
+}
+func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PatchOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PatchOptions.Merge(m, src)
+}
+func (m *PatchOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *PatchOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_PatchOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
+
+func (m *Preconditions) Reset()      { *m = Preconditions{} }
+func (*Preconditions) ProtoMessage() {}
+func (*Preconditions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{31}
+}
+func (m *Preconditions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Preconditions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Preconditions.Merge(m, src)
+}
+func (m *Preconditions) XXX_Size() int {
+	return m.Size()
+}
+func (m *Preconditions) XXX_DiscardUnknown() {
+	xxx_messageInfo_Preconditions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Preconditions proto.InternalMessageInfo
+
+func (m *RootPaths) Reset()      { *m = RootPaths{} }
+func (*RootPaths) ProtoMessage() {}
+func (*RootPaths) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{32}
+}
+func (m *RootPaths) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *RootPaths) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RootPaths.Merge(m, src)
+}
+func (m *RootPaths) XXX_Size() int {
+	return m.Size()
+}
+func (m *RootPaths) XXX_DiscardUnknown() {
+	xxx_messageInfo_RootPaths.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RootPaths proto.InternalMessageInfo
+
+func (m *ServerAddressByClientCIDR) Reset()      { *m = ServerAddressByClientCIDR{} }
+func (*ServerAddressByClientCIDR) ProtoMessage() {}
+func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{33}
+}
+func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src)
+}
+func (m *ServerAddressByClientCIDR) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
+
+func (m *Status) Reset()      { *m = Status{} }
+func (*Status) ProtoMessage() {}
+func (*Status) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{34}
+}
+func (m *Status) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Status) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Status.Merge(m, src)
+}
+func (m *Status) XXX_Size() int {
+	return m.Size()
+}
+func (m *Status) XXX_DiscardUnknown() {
+	xxx_messageInfo_Status.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Status proto.InternalMessageInfo
+
+func (m *StatusCause) Reset()      { *m = StatusCause{} }
+func (*StatusCause) ProtoMessage() {}
+func (*StatusCause) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{35}
+}
+func (m *StatusCause) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *StatusCause) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusCause.Merge(m, src)
+}
+func (m *StatusCause) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusCause) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusCause.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusCause proto.InternalMessageInfo
+
+func (m *StatusDetails) Reset()      { *m = StatusDetails{} }
+func (*StatusDetails) ProtoMessage() {}
+func (*StatusDetails) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{36}
+}
+func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *StatusDetails) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusDetails.Merge(m, src)
+}
+func (m *StatusDetails) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusDetails) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusDetails.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
+
+func (m *TableOptions) Reset()      { *m = TableOptions{} }
+func (*TableOptions) ProtoMessage() {}
+func (*TableOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{37}
+}
+func (m *TableOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TableOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TableOptions.Merge(m, src)
+}
+func (m *TableOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *TableOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_TableOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TableOptions proto.InternalMessageInfo
+
+func (m *Time) Reset()      { *m = Time{} }
+func (*Time) ProtoMessage() {}
+func (*Time) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{38}
+}
+func (m *Time) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Time.Unmarshal(m, b)
+}
+func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Time.Marshal(b, m, deterministic)
+}
+func (m *Time) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Time.Merge(m, src)
+}
+func (m *Time) XXX_Size() int {
+	return xxx_messageInfo_Time.Size(m)
+}
+func (m *Time) XXX_DiscardUnknown() {
+	xxx_messageInfo_Time.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Time proto.InternalMessageInfo
+
+func (m *Timestamp) Reset()      { *m = Timestamp{} }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{39}
+}
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+	return m.Size()
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+	xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
+
+func (m *TypeMeta) Reset()      { *m = TypeMeta{} }
+func (*TypeMeta) ProtoMessage() {}
+func (*TypeMeta) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{40}
+}
+func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TypeMeta) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TypeMeta.Merge(m, src)
+}
+func (m *TypeMeta) XXX_Size() int {
+	return m.Size()
+}
+func (m *TypeMeta) XXX_DiscardUnknown() {
+	xxx_messageInfo_TypeMeta.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
+
+func (m *UpdateOptions) Reset()      { *m = UpdateOptions{} }
+func (*UpdateOptions) ProtoMessage() {}
+func (*UpdateOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{41}
+}
+func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *UpdateOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UpdateOptions.Merge(m, src)
+}
+func (m *UpdateOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *UpdateOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_UpdateOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
+
+func (m *Verbs) Reset()      { *m = Verbs{} }
+func (*Verbs) ProtoMessage() {}
+func (*Verbs) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{42}
+}
+func (m *Verbs) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Verbs) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Verbs.Merge(m, src)
+}
+func (m *Verbs) XXX_Size() int {
+	return m.Size()
+}
+func (m *Verbs) XXX_DiscardUnknown() {
+	xxx_messageInfo_Verbs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Verbs proto.InternalMessageInfo
+
+func (m *WatchEvent) Reset()      { *m = WatchEvent{} }
+func (*WatchEvent) ProtoMessage() {}
+func (*WatchEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_cf52fa777ced5367, []int{43}
+}
+func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *WatchEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchEvent.Merge(m, src)
+}
+func (m *WatchEvent) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchEvent proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup")
+	proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList")
+	proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource")
+	proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList")
+	proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions")
+	proto.RegisterType((*Condition)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Condition")
+	proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
+	proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
+	proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
+	proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions")
+	proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
+	proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
+	proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
+	proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource")
+	proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion")
+	proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery")
+	proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind")
+	proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource")
+	proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry")
+	proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement")
+	proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List")
+	proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta")
+	proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions")
+	proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry")
+	proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime")
+	proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry")
+	proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference")
+	proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata")
+	proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList")
+	proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch")
+	proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions")
+	proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions")
+	proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths")
+	proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR")
+	proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status")
+	proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause")
+	proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails")
+	proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions")
+	proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time")
+	proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp")
+	proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta")
+	proto.RegisterType((*UpdateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.UpdateOptions")
+	proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs")
+	proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367)
+}
+
+var fileDescriptor_cf52fa777ced5367 = []byte{
+	// 2832 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0xcd, 0x6f, 0x23, 0x57,
+	0x3d, 0x63, 0xc7, 0x89, 0xfd, 0x73, 0x9c, 0x8f, 0xb7, 0x59, 0xf0, 0x06, 0x11, 0xa7, 0x53, 0xb4,
+	0xda, 0x42, 0xeb, 0x34, 0x4b, 0xa9, 0xb6, 0x5b, 0x5a, 0x88, 0xe3, 0x64, 0x1b, 0x9a, 0x34, 0xd1,
+	0xcb, 0xee, 0x02, 0xa5, 0x42, 0x9d, 0x78, 0x5e, 0x9c, 0x21, 0xe3, 0x19, 0xf7, 0xbd, 0x71, 0xb2,
+	0x86, 0x03, 0x3d, 0x80, 0x00, 0x09, 0xaa, 0x1e, 0x11, 0x07, 0xd4, 0x0a, 0xfe, 0x02, 0x2e, 0xf0,
+	0x07, 0x20, 0xd1, 0x63, 0x25, 0x2e, 0x95, 0x40, 0x56, 0x37, 0x1c, 0x38, 0x22, 0xae, 0xb9, 0x80,
+	0xde, 0xc7, 0xcc, 0xbc, 0xf1, 0xc7, 0x66, 0xdc, 0x2d, 0x15, 0x37, 0xcf, 0xef, 0xfb, 0xbd, 0xf7,
+	0x7b, 0xbf, 0xaf, 0x67, 0xd8, 0x3d, 0xb9, 0xc5, 0xaa, 0x8e, 0xbf, 0x7a, 0xd2, 0x39, 0x24, 0xd4,
+	0x23, 0x01, 0x61, 0xab, 0xa7, 0xc4, 0xb3, 0x7d, 0xba, 0xaa, 0x10, 0x56, 0xdb, 0x69, 0x59, 0x8d,
+	0x63, 0xc7, 0x23, 0xb4, 0xbb, 0xda, 0x3e, 0x69, 0x72, 0x00, 0x5b, 0x6d, 0x91, 0xc0, 0x5a, 0x3d,
+	0x5d, 0x5b, 0x6d, 0x12, 0x8f, 0x50, 0x2b, 0x20, 0x76, 0xb5, 0x4d, 0xfd, 0xc0, 0x47, 0x5f, 0x92,
+	0x5c, 0x55, 0x9d, 0xab, 0xda, 0x3e, 0x69, 0x72, 0x00, 0xab, 0x72, 0xae, 0xea, 0xe9, 0xda, 0xd2,
+	0x33, 0x4d, 0x27, 0x38, 0xee, 0x1c, 0x56, 0x1b, 0x7e, 0x6b, 0xb5, 0xe9, 0x37, 0xfd, 0x55, 0xc1,
+	0x7c, 0xd8, 0x39, 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x97, 0x46, 0x9a, 0x42, 0x3b, 0x5e,
+	0xe0, 0xb4, 0x48, 0xbf, 0x15, 0x4b, 0xcf, 0x5f, 0xc6, 0xc0, 0x1a, 0xc7, 0xa4, 0x65, 0xf5, 0xf3,
+	0x99, 0x7f, 0xc9, 0x42, 0x7e, 0x7d, 0x7f, 0xfb, 0x0e, 0xf5, 0x3b, 0x6d, 0xb4, 0x02, 0x93, 0x9e,
+	0xd5, 0x22, 0x65, 0x63, 0xc5, 0xb8, 0x51, 0xa8, 0xcd, 0x7c, 0xd0, 0xab, 0x4c, 0x9c, 0xf7, 0x2a,
+	0x93, 0xaf, 0x59, 0x2d, 0x82, 0x05, 0x06, 0xb9, 0x90, 0x3f, 0x25, 0x94, 0x39, 0xbe, 0xc7, 0xca,
+	0x99, 0x95, 0xec, 0x8d, 0xe2, 0xcd, 0x97, 0xab, 0x69, 0xd6, 0x5f, 0x15, 0x0a, 0xee, 0x4b, 0xd6,
+	0x2d, 0x9f, 0xd6, 0x1d, 0xd6, 0xf0, 0x4f, 0x09, 0xed, 0xd6, 0xe6, 0x95, 0x96, 0xbc, 0x42, 0x32,
+	0x1c, 0x69, 0x40, 0x3f, 0x31, 0x60, 0xbe, 0x4d, 0xc9, 0x11, 0xa1, 0x94, 0xd8, 0x0a, 0x5f, 0xce,
+	0xae, 0x18, 0x9f, 0x82, 0xda, 0xb2, 0x52, 0x3b, 0xbf, 0xdf, 0x27, 0x1f, 0x0f, 0x68, 0x44, 0xbf,
+	0x33, 0x60, 0x89, 0x11, 0x7a, 0x4a, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58, 0xad, 0xbb, 0xe1, 0x3a,
+	0xc4, 0x0b, 0x36, 0xb6, 0xeb, 0x98, 0x95, 0x27, 0xc5, 0x3e, 0x7c, 0x23, 0x9d, 0x41, 0x07, 0xa3,
+	0xe4, 0xd4, 0x4c, 0x65, 0xd1, 0xd2, 0x48, 0x12, 0x86, 0x1f, 0x61, 0x86, 0x79, 0x04, 0x33, 0xe1,
+	0x41, 0xee, 0x38, 0x2c, 0x40, 0xf7, 0x61, 0xaa, 0xc9, 0x3f, 0x58, 0xd9, 0x10, 0x06, 0x56, 0xd3,
+	0x19, 0x18, 0xca, 0xa8, 0xcd, 0x2a, 0x7b, 0xa6, 0xc4, 0x27, 0xc3, 0x4a, 0x9a, 0xf9, 0x8b, 0x49,
+	0x28, 0xae, 0xef, 0x6f, 0x63, 0xc2, 0xfc, 0x0e, 0x6d, 0x90, 0x14, 0x4e, 0x73, 0x0b, 0x66, 0x98,
+	0xe3, 0x35, 0x3b, 0xae, 0x45, 0x39, 0xb4, 0x3c, 0x25, 0x28, 0x17, 0x15, 0xe5, 0xcc, 0x81, 0x86,
+	0xc3, 0x09, 0x4a, 0x74, 0x13, 0x80, 0x4b, 0x60, 0x6d, 0xab, 0x41, 0xec, 0x72, 0x66, 0xc5, 0xb8,
+	0x91, 0xaf, 0x21, 0xc5, 0x07, 0xaf, 0x45, 0x18, 0xac, 0x51, 0xa1, 0x27, 0x21, 0x27, 0x2c, 0x2d,
+	0xe7, 0x85, 0x9a, 0x92, 0x22, 0xcf, 0x89, 0x65, 0x60, 0x89, 0x43, 0x4f, 0xc1, 0xb4, 0xf2, 0xb2,
+	0x72, 0x41, 0x90, 0xcd, 0x29, 0xb2, 0xe9, 0xd0, 0x0d, 0x42, 0x3c, 0x5f, 0xdf, 0x89, 0xe3, 0xd9,
+	0xc2, 0xef, 0xb4, 0xf5, 0xbd, 0xea, 0x78, 0x36, 0x16, 0x18, 0xb4, 0x03, 0xb9, 0x53, 0x42, 0x0f,
+	0xb9, 0x27, 0x70, 0xd7, 0xfc, 0x4a, 0xba, 0x8d, 0xbe, 0xcf, 0x59, 0x6a, 0x05, 0x6e, 0x9a, 0xf8,
+	0x89, 0xa5, 0x10, 0x54, 0x05, 0x60, 0xc7, 0x3e, 0x0d, 0xc4, 0xf2, 0xca, 0xb9, 0x95, 0xec, 0x8d,
+	0x42, 0x6d, 0x96, 0xaf, 0xf7, 0x20, 0x82, 0x62, 0x8d, 0x82, 0xd3, 0x37, 0xac, 0x80, 0x34, 0x7d,
+	0xea, 0x10, 0x56, 0x9e, 0x8e, 0xe9, 0x37, 0x22, 0x28, 0xd6, 0x28, 0xd0, 0xb7, 0x00, 0xb1, 0xc0,
+	0xa7, 0x56, 0x93, 0xa8, 0xa5, 0xbe, 0x62, 0xb1, 0xe3, 0x32, 0x88, 0xd5, 0x2d, 0xa9, 0xd5, 0xa1,
+	0x83, 0x01, 0x0a, 0x3c, 0x84, 0xcb, 0xfc, 0x83, 0x01, 0x73, 0x9a, 0x2f, 0x08, 0xbf, 0xbb, 0x05,
+	0x33, 0x4d, 0xed, 0xd6, 0x29, 0xbf, 0x88, 0x4e, 0x5b, 0xbf, 0x91, 0x38, 0x41, 0x89, 0x08, 0x14,
+	0xa8, 0x92, 0x14, 0x46, 0x97, 0xb5, 0xd4, 0x4e, 0x1b, 0xda, 0x10, 0x6b, 0xd2, 0x80, 0x0c, 0xc7,
+	0x92, 0xcd, 0x7f, 0x1a, 0xc2, 0x81, 0xc3, 0x78, 0x83, 0x6e, 0x68, 0x31, 0xcd, 0x10, 0xdb, 0x37,
+	0x33, 0x22, 0x1e, 0x5d, 0x12, 0x08, 0x32, 0xff, 0x17, 0x81, 0xe0, 0x76, 0xfe, 0xd7, 0xef, 0x55,
+	0x26, 0xde, 0xfe, 0xfb, 0xca, 0x84, 0xf9, 0x9f, 0x0c, 0x14, 0x36, 0x7c, 0xcf, 0x76, 0x02, 0xe5,
+	0xc8, 0x41, 0xb7, 0x3d, 0x70, 0x51, 0xef, 0x76, 0xdb, 0x04, 0x0b, 0x0c, 0x7a, 0x01, 0xa6, 0x58,
+	0x60, 0x05, 0x1d, 0x26, 0xae, 0x5a, 0xa1, 0xf6, 0x44, 0x18, 0x02, 0x0e, 0x04, 0xf4, 0xa2, 0x57,
+	0x99, 0x8b, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xab, 0xfc, 0x43, 0x61, 0x94, 0x7d, 0x47, 0xa6,
+	0x98, 0x30, 0x56, 0x67, 0x63, 0xaf, 0xda, 0x1b, 0xa0, 0xc0, 0x43, 0xb8, 0xd0, 0x29, 0x20, 0xd7,
+	0x62, 0xc1, 0x5d, 0x6a, 0x79, 0x4c, 0xe8, 0xba, 0xeb, 0xb4, 0x88, 0xba, 0x5c, 0x5f, 0x4e, 0xb7,
+	0xbb, 0x9c, 0x23, 0xd6, 0xbb, 0x33, 0x20, 0x0d, 0x0f, 0xd1, 0x80, 0xae, 0xc3, 0x14, 0x25, 0x16,
+	0xf3, 0xbd, 0x72, 0x4e, 0x2c, 0x3f, 0x8a, 0x80, 0x58, 0x40, 0xb1, 0xc2, 0xf2, 0xe0, 0xd1, 0x22,
+	0x8c, 0x59, 0xcd, 0x30, 0x94, 0x45, 0xc1, 0x63, 0x57, 0x82, 0x71, 0x88, 0x37, 0x5b, 0x50, 0xda,
+	0xa0, 0xc4, 0x0a, 0xc8, 0x5e, 0x3b, 0x10, 0x2e, 0x64, 0xc2, 0x94, 0x4d, 0xbb, 0xb8, 0xe3, 0x29,
+	0x57, 0x03, 0x2e, 0xbf, 0x2e, 0x20, 0x58, 0x61, 0xf8, 0x0d, 0x3a, 0x72, 0x88, 0x6b, 0xef, 0x5a,
+	0x9e, 0xd5, 0x24, 0x54, 0x45, 0x9e, 0xc8, 0xaf, 0xb7, 0x34, 0x1c, 0x4e, 0x50, 0x9a, 0x3f, 0xcb,
+	0x42, 0xa9, 0x4e, 0x5c, 0x12, 0xeb, 0xdb, 0x02, 0xd4, 0xa4, 0x56, 0x83, 0xec, 0x13, 0xea, 0xf8,
+	0xf6, 0x01, 0x69, 0xf8, 0x9e, 0xcd, 0x84, 0x0b, 0x64, 0x6b, 0x9f, 0xe3, 0x7b, 0x73, 0x67, 0x00,
+	0x8b, 0x87, 0x70, 0x20, 0x17, 0x4a, 0x6d, 0x2a, 0x7e, 0x8b, 0xfd, 0x92, 0x1e, 0x52, 0xbc, 0xf9,
+	0xd5, 0x74, 0xc7, 0xb1, 0xaf, 0xb3, 0xd6, 0x16, 0xce, 0x7b, 0x95, 0x52, 0x02, 0x84, 0x93, 0xc2,
+	0xd1, 0x37, 0x61, 0xde, 0xa7, 0xed, 0x63, 0xcb, 0xab, 0x93, 0x36, 0xf1, 0x6c, 0xe2, 0x05, 0x4c,
+	0xec, 0x42, 0xbe, 0xb6, 0xc8, 0x73, 0xf6, 0x5e, 0x1f, 0x0e, 0x0f, 0x50, 0xa3, 0xd7, 0x61, 0xa1,
+	0x4d, 0xfd, 0xb6, 0xd5, 0x14, 0x2e, 0xb5, 0xef, 0xbb, 0x4e, 0xa3, 0x2b, 0x5c, 0xa8, 0x50, 0x7b,
+	0xfa, 0xbc, 0x57, 0x59, 0xd8, 0xef, 0x47, 0x5e, 0xf4, 0x2a, 0x57, 0xc4, 0xd6, 0x71, 0x48, 0x8c,
+	0xc4, 0x83, 0x62, 0xb4, 0x33, 0xcc, 0x8d, 0x3a, 0x43, 0x73, 0x1b, 0xf2, 0xf5, 0x8e, 0xf2, 0xe7,
+	0x97, 0x20, 0x6f, 0xab, 0xdf, 0x6a, 0xe7, 0xc3, 0x8b, 0x15, 0xd1, 0x5c, 0xf4, 0x2a, 0x25, 0x5e,
+	0xa6, 0x55, 0x43, 0x00, 0x8e, 0x58, 0xcc, 0x37, 0xa0, 0xb4, 0xf9, 0xa0, 0xed, 0xd3, 0x20, 0x3c,
+	0xd3, 0xeb, 0x30, 0x45, 0x04, 0x40, 0x48, 0xcb, 0xc7, 0x7e, 0x2a, 0xc9, 0xb0, 0xc2, 0xf2, 0x4c,
+	0x48, 0x1e, 0x58, 0x8d, 0x40, 0x25, 0xce, 0x28, 0x13, 0x6e, 0x72, 0x20, 0x96, 0x38, 0xf3, 0x3a,
+	0xe4, 0x85, 0x43, 0xb1, 0xfb, 0x6b, 0x68, 0x1e, 0xb2, 0xd8, 0x3a, 0x13, 0x52, 0x67, 0x70, 0x96,
+	0x5a, 0x67, 0x5a, 0x2c, 0xd9, 0x03, 0xb8, 0x43, 0x22, 0x13, 0xd6, 0x61, 0x2e, 0x0c, 0xa8, 0xc9,
+	0x38, 0xff, 0x79, 0xa5, 0x64, 0x0e, 0x27, 0xd1, 0xb8, 0x9f, 0xde, 0x7c, 0x03, 0x0a, 0x22, 0x17,
+	0xf0, 0x44, 0x1a, 0x27, 0x6d, 0xe3, 0x11, 0x49, 0x3b, 0xcc, 0xc4, 0x99, 0x51, 0x99, 0x58, 0x33,
+	0xd7, 0x85, 0x92, 0xe4, 0x0d, 0xcb, 0x94, 0x54, 0x1a, 0x9e, 0x86, 0x7c, 0x68, 0xa6, 0xd2, 0x12,
+	0x95, 0xa7, 0xa1, 0x20, 0x1c, 0x51, 0x68, 0xda, 0x8e, 0x21, 0x91, 0xd7, 0xd2, 0x29, 0xd3, 0x6a,
+	0x90, 0xcc, 0xa3, 0x6b, 0x10, 0x4d, 0xd3, 0x8f, 0xa1, 0x3c, 0xaa, 0xa6, 0x7d, 0x8c, 0xcc, 0x9b,
+	0xde, 0x14, 0xf3, 0x1d, 0x03, 0xe6, 0x75, 0x49, 0xe9, 0x8f, 0x2f, 0xbd, 0x92, 0xcb, 0x6b, 0x2e,
+	0x6d, 0x47, 0x7e, 0x6b, 0xc0, 0x62, 0x62, 0x69, 0x63, 0x9d, 0xf8, 0x18, 0x46, 0xe9, 0xce, 0x91,
+	0x1d, 0xc3, 0x39, 0xfe, 0x9a, 0x81, 0xd2, 0x8e, 0x75, 0x48, 0xdc, 0x03, 0xe2, 0x92, 0x46, 0xe0,
+	0x53, 0xf4, 0x23, 0x28, 0xb6, 0xac, 0xa0, 0x71, 0x2c, 0xa0, 0x61, 0x7d, 0x5e, 0x4f, 0x17, 0x4a,
+	0x13, 0x92, 0xaa, 0xbb, 0xb1, 0x98, 0x4d, 0x2f, 0xa0, 0xdd, 0xda, 0x15, 0x65, 0x52, 0x51, 0xc3,
+	0x60, 0x5d, 0x9b, 0x68, 0xaa, 0xc4, 0xf7, 0xe6, 0x83, 0x36, 0x2f, 0x1e, 0xc6, 0xef, 0xe5, 0x12,
+	0x26, 0x60, 0xf2, 0x56, 0xc7, 0xa1, 0xa4, 0x45, 0xbc, 0x20, 0x6e, 0xaa, 0x76, 0xfb, 0xe4, 0xe3,
+	0x01, 0x8d, 0x4b, 0x2f, 0xc3, 0x7c, 0xbf, 0xf1, 0x3c, 0xfe, 0x9c, 0x90, 0xae, 0x3c, 0x2f, 0xcc,
+	0x7f, 0xa2, 0x45, 0xc8, 0x9d, 0x5a, 0x6e, 0x47, 0xdd, 0x46, 0x2c, 0x3f, 0x6e, 0x67, 0x6e, 0x19,
+	0xe6, 0xef, 0x0d, 0x28, 0x8f, 0x32, 0x04, 0x7d, 0x51, 0x13, 0x54, 0x2b, 0x2a, 0xab, 0xb2, 0xaf,
+	0x92, 0xae, 0x94, 0xba, 0x09, 0x79, 0xbf, 0xcd, 0xab, 0x0d, 0x9f, 0xaa, 0x53, 0x7f, 0x2a, 0x3c,
+	0xc9, 0x3d, 0x05, 0xbf, 0xe8, 0x55, 0xae, 0x26, 0xc4, 0x87, 0x08, 0x1c, 0xb1, 0xf2, 0x3c, 0x20,
+	0xec, 0xe1, 0xb9, 0x29, 0xca, 0x03, 0xf7, 0x05, 0x04, 0x2b, 0x8c, 0xf9, 0x27, 0x03, 0x26, 0x45,
+	0x59, 0xfc, 0x06, 0xe4, 0xf9, 0xfe, 0xd9, 0x56, 0x60, 0x09, 0xbb, 0x52, 0x37, 0x64, 0x9c, 0x7b,
+	0x97, 0x04, 0x56, 0xec, 0x6d, 0x21, 0x04, 0x47, 0x12, 0x11, 0x86, 0x9c, 0x13, 0x90, 0x56, 0x78,
+	0x90, 0xcf, 0x8c, 0x14, 0xad, 0xc6, 0x01, 0x55, 0x6c, 0x9d, 0x6d, 0x3e, 0x08, 0x88, 0xc7, 0x0f,
+	0x23, 0xbe, 0x1a, 0xdb, 0x5c, 0x06, 0x96, 0xa2, 0xcc, 0x7f, 0x1b, 0x10, 0xa9, 0xe2, 0xce, 0xcf,
+	0x88, 0x7b, 0xb4, 0xe3, 0x78, 0x27, 0x6a, 0x5b, 0x23, 0x73, 0x0e, 0x14, 0x1c, 0x47, 0x14, 0xc3,
+	0xd2, 0x43, 0x66, 0xbc, 0xf4, 0xc0, 0x15, 0x36, 0x7c, 0x2f, 0x70, 0xbc, 0xce, 0xc0, 0x6d, 0xdb,
+	0x50, 0x70, 0x1c, 0x51, 0xf0, 0x32, 0x87, 0x92, 0x96, 0xe5, 0x78, 0x8e, 0xd7, 0xe4, 0x8b, 0xd8,
+	0xf0, 0x3b, 0x5e, 0x20, 0xf2, 0xbd, 0x2a, 0x73, 0xf0, 0x00, 0x16, 0x0f, 0xe1, 0x30, 0xff, 0x38,
+	0x09, 0x45, 0xbe, 0xe6, 0x30, 0xcf, 0xbd, 0x08, 0x25, 0x57, 0xf7, 0x02, 0xb5, 0xf6, 0xab, 0xca,
+	0x94, 0xe4, 0xbd, 0xc6, 0x49, 0x5a, 0xce, 0x2c, 0xaa, 0xb3, 0x88, 0x39, 0x93, 0x64, 0xde, 0xd2,
+	0x91, 0x38, 0x49, 0xcb, 0xa3, 0xd7, 0x19, 0xbf, 0x1f, 0xaa, 0xee, 0x89, 0x8e, 0xe8, 0xdb, 0x1c,
+	0x88, 0x25, 0x0e, 0xed, 0xc2, 0x15, 0xcb, 0x75, 0xfd, 0x33, 0x01, 0xac, 0xf9, 0xfe, 0x49, 0xcb,
+	0xa2, 0x27, 0x4c, 0xb4, 0xb4, 0xf9, 0xda, 0x17, 0x14, 0xcb, 0x95, 0xf5, 0x41, 0x12, 0x3c, 0x8c,
+	0x6f, 0xd8, 0xb1, 0x4d, 0x8e, 0x79, 0x6c, 0xc7, 0xb0, 0xd8, 0x07, 0x12, 0xb7, 0x5c, 0xf5, 0x97,
+	0xcf, 0x29, 0x39, 0x8b, 0x78, 0x08, 0xcd, 0xc5, 0x08, 0x38, 0x1e, 0x2a, 0x11, 0xdd, 0x86, 0x59,
+	0xee, 0xc9, 0x7e, 0x27, 0x08, 0xab, 0xda, 0x9c, 0x38, 0x6e, 0x74, 0xde, 0xab, 0xcc, 0xde, 0x4d,
+	0x60, 0x70, 0x1f, 0x25, 0xdf, 0x5c, 0xd7, 0x69, 0x39, 0x41, 0x79, 0x5a, 0xb0, 0x44, 0x9b, 0xbb,
+	0xc3, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0xbf, 0xcc, 0x03, 0xcd, 0xdf, 0x64, 0x01, 0xc9, 0x32,
+	0xdc, 0x96, 0xf5, 0x94, 0x0c, 0x69, 0xbc, 0x57, 0x50, 0x65, 0xbc, 0xd1, 0xd7, 0x2b, 0xa8, 0x0a,
+	0x3e, 0xc4, 0xa3, 0x5d, 0x28, 0xc8, 0xd0, 0x12, 0x5f, 0x97, 0x55, 0x45, 0x5c, 0xd8, 0x0b, 0x11,
+	0x17, 0xbd, 0xca, 0x52, 0x42, 0x4d, 0x84, 0x11, 0x7d, 0x5c, 0x2c, 0x01, 0xdd, 0x04, 0xb0, 0xda,
+	0x8e, 0x3e, 0x35, 0x2b, 0xc4, 0xb3, 0x93, 0xb8, 0xff, 0xc5, 0x1a, 0x15, 0x7a, 0x05, 0x26, 0x83,
+	0x4f, 0xd6, 0x6b, 0xe5, 0x45, 0x2b, 0xc9, 0x3b, 0x2b, 0x21, 0x81, 0x6b, 0x17, 0xfe, 0xcc, 0xb8,
+	0x59, 0xaa, 0x4d, 0x8a, 0xb4, 0x6f, 0x45, 0x18, 0xac, 0x51, 0xa1, 0xef, 0x40, 0xfe, 0x48, 0x95,
+	0xa2, 0xe2, 0x60, 0x52, 0x87, 0xc8, 0xb0, 0x80, 0x95, 0x8d, 0x7b, 0xf8, 0x85, 0x23, 0x69, 0xe6,
+	0x5b, 0x50, 0xd8, 0x75, 0x1a, 0xd4, 0x17, 0x6d, 0xde, 0x53, 0x30, 0xcd, 0x12, 0x7d, 0x50, 0x74,
+	0x24, 0xa1, 0xbb, 0x84, 0x78, 0xee, 0x27, 0x9e, 0xe5, 0xf9, 0xb2, 0xdb, 0xc9, 0xc5, 0x7e, 0xf2,
+	0x1a, 0x07, 0x62, 0x89, 0xbb, 0xbd, 0xc8, 0x33, 0xfd, 0xcf, 0xdf, 0xaf, 0x4c, 0xbc, 0xfb, 0x7e,
+	0x65, 0xe2, 0xbd, 0xf7, 0x55, 0xd6, 0xbf, 0x00, 0x80, 0xbd, 0xc3, 0x1f, 0x90, 0x86, 0x8c, 0x9f,
+	0xa9, 0xa6, 0x64, 0xe1, 0x70, 0x56, 0x4c, 0xc9, 0x32, 0x7d, 0xd5, 0x9b, 0x86, 0xc3, 0x09, 0x4a,
+	0xb4, 0x0a, 0x85, 0x68, 0xfe, 0xa5, 0x0e, 0x7a, 0x21, 0x74, 0x9c, 0x68, 0x48, 0x86, 0x63, 0x9a,
+	0x44, 0x30, 0x9f, 0xbc, 0x34, 0x98, 0xd7, 0x20, 0xdb, 0x71, 0x6c, 0xd5, 0x13, 0x3f, 0x1b, 0x26,
+	0xd3, 0x7b, 0xdb, 0xf5, 0x8b, 0x5e, 0xe5, 0x89, 0x51, 0x63, 0xe7, 0xa0, 0xdb, 0x26, 0xac, 0x7a,
+	0x6f, 0xbb, 0x8e, 0x39, 0xf3, 0xb0, 0xc8, 0x32, 0x35, 0x66, 0x64, 0xb9, 0x09, 0xd0, 0x8c, 0x27,
+	0x0b, 0xf2, 0xe2, 0x46, 0x1e, 0xa5, 0x4d, 0x14, 0x34, 0x2a, 0xc4, 0x60, 0xa1, 0xc1, 0xdb, 0x6f,
+	0xd5, 0xe1, 0xb3, 0xc0, 0x6a, 0xc9, 0xb9, 0xe0, 0x78, 0xce, 0x7d, 0x4d, 0xa9, 0x59, 0xd8, 0xe8,
+	0x17, 0x86, 0x07, 0xe5, 0x23, 0x1f, 0x16, 0x6c, 0xd5, 0x48, 0xc6, 0x4a, 0x0b, 0x63, 0x2b, 0xbd,
+	0xca, 0x15, 0xd6, 0xfb, 0x05, 0xe1, 0x41, 0xd9, 0xe8, 0xfb, 0xb0, 0x14, 0x02, 0x07, 0xbb, 0x79,
+	0x11, 0x79, 0xb3, 0xb5, 0xe5, 0xf3, 0x5e, 0x65, 0xa9, 0x3e, 0x92, 0x0a, 0x3f, 0x42, 0x02, 0xb2,
+	0x61, 0xca, 0x95, 0x95, 0x6a, 0x51, 0x54, 0x17, 0x5f, 0x4f, 0xb7, 0x8a, 0xd8, 0xfb, 0xab, 0x7a,
+	0x85, 0x1a, 0x75, 0xab, 0xaa, 0x38, 0x55, 0xb2, 0xd1, 0x03, 0x28, 0x5a, 0x9e, 0xe7, 0x07, 0x96,
+	0x9c, 0x2f, 0xcc, 0x08, 0x55, 0xeb, 0x63, 0xab, 0x5a, 0x8f, 0x65, 0xf4, 0x55, 0xc4, 0x1a, 0x06,
+	0xeb, 0xaa, 0xd0, 0x19, 0xcc, 0xf9, 0x67, 0x1e, 0xa1, 0x98, 0x1c, 0x11, 0x4a, 0xbc, 0x06, 0x61,
+	0xe5, 0x92, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0xfb, 0xb5,
+	0xa0, 0x2a, 0x0f, 0x92, 0x9e, 0xe5, 0x3a, 0x3f, 0x24, 0x94, 0x95, 0x67, 0xe3, 0xd1, 0xed, 0x56,
+	0x04, 0xc5, 0x1a, 0x05, 0xfa, 0x1a, 0x14, 0x1b, 0x6e, 0x87, 0x05, 0x44, 0xce, 0xd1, 0xe7, 0xc4,
+	0x0d, 0x8a, 0xd6, 0xb7, 0x11, 0xa3, 0xb0, 0x4e, 0x87, 0x3a, 0x50, 0x6a, 0xe9, 0x29, 0xa3, 0xbc,
+	0x20, 0x56, 0x77, 0x2b, 0xdd, 0xea, 0x06, 0x93, 0x5a, 0x5c, 0xc1, 0x24, 0x70, 0x38, 0xa9, 0x65,
+	0xe9, 0x05, 0x28, 0x7e, 0xc2, 0xe2, 0x9e, 0x37, 0x07, 0xfd, 0xe7, 0x38, 0x56, 0x73, 0xf0, 0xe7,
+	0x0c, 0xcc, 0x26, 0x77, 0xbf, 0x2f, 0x1d, 0xe6, 0x52, 0xa5, 0xc3, 0xb0, 0x0d, 0x35, 0x46, 0x8e,
+	0xfe, 0xc3, 0xb0, 0x9e, 0x1d, 0x19, 0xd6, 0x55, 0xf4, 0x9c, 0x7c, 0x9c, 0xe8, 0x59, 0x05, 0xe0,
+	0x75, 0x06, 0xf5, 0x5d, 0x97, 0x50, 0x11, 0x38, 0xf3, 0x6a, 0xc4, 0x1f, 0x41, 0xb1, 0x46, 0xc1,
+	0xab, 0xe1, 0x43, 0xd7, 0x6f, 0x9c, 0x88, 0x2d, 0x08, 0x2f, 0xbd, 0x08, 0x99, 0x79, 0x59, 0x0d,
+	0xd7, 0x06, 0xb0, 0x78, 0x08, 0x87, 0xd9, 0x85, 0xab, 0xfb, 0x16, 0x0d, 0x1c, 0xcb, 0x8d, 0x2f,
+	0x98, 0x68, 0x37, 0xde, 0x1c, 0x68, 0x66, 0x9e, 0x1d, 0xf7, 0xa2, 0xc6, 0x9b, 0x1f, 0xc3, 0xe2,
+	0x86, 0xc6, 0xfc, 0x9b, 0x01, 0xd7, 0x86, 0xea, 0xfe, 0x0c, 0x9a, 0xa9, 0x37, 0x93, 0xcd, 0xd4,
+	0x8b, 0x29, 0x67, 0x9c, 0xc3, 0xac, 0x1d, 0xd1, 0x5a, 0x4d, 0x43, 0x6e, 0x9f, 0x17, 0xb1, 0xe6,
+	0xaf, 0x0c, 0x98, 0x11, 0xbf, 0xc6, 0x99, 0x0f, 0x57, 0x20, 0x77, 0xe4, 0x87, 0x23, 0xaa, 0xbc,
+	0x7c, 0x42, 0xda, 0xe2, 0x00, 0x2c, 0xe1, 0x8f, 0x31, 0x40, 0x7e, 0xc7, 0x80, 0xe4, 0x64, 0x16,
+	0xbd, 0x2c, 0xfd, 0xd7, 0x88, 0x46, 0xa7, 0x63, 0xfa, 0xee, 0x4b, 0xa3, 0x5a, 0xc1, 0x2b, 0xa9,
+	0xa6, 0x84, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6f, 0x05, 0xc7, 0x8c, 0x2f, 0xbc, 0xcd, 0x7f,
+	0xa8, 0xbd, 0x11, 0x0b, 0x17, 0x18, 0x2c, 0xe1, 0xe6, 0x2f, 0x0d, 0xb8, 0x36, 0xf2, 0xd5, 0x84,
+	0x87, 0x80, 0x46, 0xf4, 0xa5, 0x56, 0x14, 0x79, 0x61, 0x4c, 0x87, 0x35, 0x2a, 0xde, 0xc3, 0x25,
+	0x9e, 0x5a, 0xfa, 0x7b, 0xb8, 0x84, 0x36, 0x9c, 0xa4, 0x35, 0xff, 0x95, 0x01, 0xf5, 0x74, 0xf2,
+	0x3f, 0xf6, 0xd8, 0xeb, 0x7d, 0x0f, 0x37, 0xb3, 0xc9, 0x87, 0x9b, 0xe8, 0x95, 0x46, 0x7b, 0xb9,
+	0xc8, 0x3e, 0xfa, 0xe5, 0x02, 0x3d, 0x1f, 0x3d, 0x86, 0xc8, 0xd0, 0xb5, 0x9c, 0x7c, 0x0c, 0xb9,
+	0xe8, 0x55, 0x66, 0x94, 0xf0, 0xe4, 0xe3, 0xc8, 0xeb, 0x30, 0x6d, 0x93, 0xc0, 0x72, 0x5c, 0xd9,
+	0x8f, 0xa5, 0x7e, 0x22, 0x90, 0xc2, 0xea, 0x92, 0xb5, 0x56, 0xe4, 0x36, 0xa9, 0x0f, 0x1c, 0x0a,
+	0xe4, 0xd1, 0xb6, 0xe1, 0xdb, 0xb2, 0x9d, 0xc8, 0xc5, 0xd1, 0x76, 0xc3, 0xb7, 0x09, 0x16, 0x18,
+	0xf3, 0x5d, 0x03, 0x8a, 0x52, 0xd2, 0x86, 0xd5, 0x61, 0x04, 0xad, 0x45, 0xab, 0x90, 0xc7, 0x7d,
+	0x4d, 0x7f, 0xf5, 0xba, 0xe8, 0x55, 0x0a, 0x82, 0x4c, 0x74, 0x22, 0x43, 0x5e, 0x77, 0x32, 0x97,
+	0xec, 0xd1, 0x93, 0x90, 0x13, 0xb7, 0x47, 0x6d, 0x66, 0x74, 0xd7, 0xc5, 0x05, 0xc3, 0x12, 0x67,
+	0x7e, 0x9c, 0x81, 0x52, 0x62, 0x71, 0x29, 0x7a, 0x81, 0x68, 0x74, 0x99, 0x49, 0x31, 0x0e, 0x1f,
+	0xfd, 0x30, 0xad, 0x72, 0xcf, 0xd4, 0xe3, 0xe4, 0x9e, 0xef, 0xc2, 0x54, 0x83, 0xef, 0x51, 0xf8,
+	0x3f, 0x87, 0xb5, 0x71, 0x8e, 0x53, 0xec, 0x6e, 0xec, 0x8d, 0xe2, 0x93, 0x61, 0x25, 0x10, 0xdd,
+	0x81, 0x05, 0x4a, 0x02, 0xda, 0x5d, 0x3f, 0x0a, 0x08, 0xd5, 0x9b, 0xf8, 0x5c, 0x5c, 0x71, 0xe3,
+	0x7e, 0x02, 0x3c, 0xc8, 0x63, 0x1e, 0xc2, 0xcc, 0x5d, 0xeb, 0xd0, 0x8d, 0x1e, 0xbd, 0x30, 0x94,
+	0x1c, 0xaf, 0xe1, 0x76, 0x6c, 0x22, 0xa3, 0x71, 0x18, 0xbd, 0xc2, 0x4b, 0xbb, 0xad, 0x23, 0x2f,
+	0x7a, 0x95, 0x2b, 0x09, 0x80, 0x7c, 0xe5, 0xc1, 0x49, 0x11, 0xa6, 0x0b, 0x93, 0x9f, 0x61, 0xf7,
+	0xf8, 0x3d, 0x28, 0xc4, 0xf5, 0xfd, 0xa7, 0xac, 0xd2, 0x7c, 0x13, 0xf2, 0xdc, 0xe3, 0xc3, 0xbe,
+	0xf4, 0x92, 0x12, 0x27, 0x59, 0x38, 0x65, 0xd2, 0x14, 0x4e, 0x66, 0x0b, 0x4a, 0xf7, 0xda, 0xf6,
+	0x63, 0x3e, 0x7b, 0x66, 0x52, 0x67, 0xad, 0x9b, 0x20, 0xff, 0x42, 0xc1, 0x13, 0x84, 0xcc, 0xdc,
+	0x5a, 0x82, 0xd0, 0x13, 0xaf, 0x36, 0x95, 0xff, 0xa9, 0x01, 0x20, 0xc6, 0x5f, 0x9b, 0xa7, 0xc4,
+	0x0b, 0x52, 0x3c, 0x8e, 0xdf, 0x83, 0x29, 0x5f, 0x7a, 0x93, 0x7c, 0xfa, 0x1c, 0x73, 0xc6, 0x1a,
+	0x5d, 0x02, 0xe9, 0x4f, 0x58, 0x09, 0xab, 0xdd, 0xf8, 0xe0, 0xe1, 0xf2, 0xc4, 0x87, 0x0f, 0x97,
+	0x27, 0x3e, 0x7a, 0xb8, 0x3c, 0xf1, 0xf6, 0xf9, 0xb2, 0xf1, 0xc1, 0xf9, 0xb2, 0xf1, 0xe1, 0xf9,
+	0xb2, 0xf1, 0xd1, 0xf9, 0xb2, 0xf1, 0xf1, 0xf9, 0xb2, 0xf1, 0xee, 0x3f, 0x96, 0x27, 0x5e, 0xcf,
+	0x9c, 0xae, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x82, 0x62, 0x88, 0xff, 0xb8, 0x26, 0x00, 0x00,
+}
+
+func (m *APIGroup) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.ServerAddressByClientCIDRs) > 0 {
+		for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	{
+		size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Versions) > 0 {
+		for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *APIGroupList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Groups) > 0 {
+		for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *APIResource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *APIResource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.StorageVersionHash)
+	copy(dAtA[i:], m.StorageVersionHash)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash)))
+	i--
+	dAtA[i] = 0x52
+	i -= len(m.Version)
+	copy(dAtA[i:], m.Version)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+	i--
+	dAtA[i] = 0x4a
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0x42
+	if len(m.Categories) > 0 {
+		for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Categories[iNdEx])
+			copy(dAtA[i:], m.Categories[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx])))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i -= len(m.SingularName)
+	copy(dAtA[i:], m.SingularName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName)))
+	i--
+	dAtA[i] = 0x32
+	if len(m.ShortNames) > 0 {
+		for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ShortNames[iNdEx])
+			copy(dAtA[i:], m.ShortNames[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.Verbs != nil {
+		{
+			size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x1a
+	i--
+	if m.Namespaced {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *APIResourceList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.APIResources) > 0 {
+		for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.GroupVersion)
+	copy(dAtA[i:], m.GroupVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *APIVersions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.ServerAddressByClientCIDRs) > 0 {
+		for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Versions) > 0 {
+		for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Versions[iNdEx])
+			copy(dAtA[i:], m.Versions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Condition) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Condition) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Condition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x2a
+	{
+		size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x22
+	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.FieldManager)
+	copy(dAtA[i:], m.FieldManager)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.DryRun) > 0 {
+		for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.DryRun[iNdEx])
+			copy(dAtA[i:], m.DryRun[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeleteOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.DryRun) > 0 {
+		for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.DryRun[iNdEx])
+			copy(dAtA[i:], m.DryRun[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.PropagationPolicy != nil {
+		i -= len(*m.PropagationPolicy)
+		copy(dAtA[i:], *m.PropagationPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.OrphanDependents != nil {
+		i--
+		if *m.OrphanDependents {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Preconditions != nil {
+		{
+			size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.GracePeriodSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Duration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Duration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Duration))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ExportOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.Exact {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i--
+	if m.Export {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *FieldsV1) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Raw != nil {
+		i -= len(m.Raw)
+		copy(dAtA[i:], m.Raw)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *GetOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ResourceVersion)
+	copy(dAtA[i:], m.ResourceVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GroupKind) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GroupResource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Resource)
+	copy(dAtA[i:], m.Resource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GroupVersion) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Version)
+	copy(dAtA[i:], m.Version)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Version)
+	copy(dAtA[i:], m.Version)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.GroupVersion)
+	copy(dAtA[i:], m.GroupVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Version)
+	copy(dAtA[i:], m.Version)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Resource)
+	copy(dAtA[i:], m.Resource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Version)
+	copy(dAtA[i:], m.Version)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *LabelSelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.MatchExpressions) > 0 {
+		for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.MatchLabels) > 0 {
+		keysForMatchLabels := make([]string, 0, len(m.MatchLabels))
+		for k := range m.MatchLabels {
+			keysForMatchLabels = append(keysForMatchLabels, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
+		for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.MatchLabels[string(keysForMatchLabels[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForMatchLabels[iNdEx])
+			copy(dAtA[i:], keysForMatchLabels[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Values) > 0 {
+		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Values[iNdEx])
+			copy(dAtA[i:], m.Values[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.Operator)
+	copy(dAtA[i:], m.Operator)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *List) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *List) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ListMeta) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.RemainingItemCount != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount))
+		i--
+		dAtA[i] = 0x20
+	}
+	i -= len(m.Continue)
+	copy(dAtA[i:], m.Continue)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.ResourceVersion)
+	copy(dAtA[i:], m.ResourceVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.SelfLink)
+	copy(dAtA[i:], m.SelfLink)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ListOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ResourceVersionMatch)
+	copy(dAtA[i:], m.ResourceVersionMatch)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersionMatch)))
+	i--
+	dAtA[i] = 0x52
+	i--
+	if m.AllowWatchBookmarks {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x48
+	i -= len(m.Continue)
+	copy(dAtA[i:], m.Continue)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
+	i--
+	dAtA[i] = 0x42
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Limit))
+	i--
+	dAtA[i] = 0x38
+	if m.TimeoutSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+		i--
+		dAtA[i] = 0x28
+	}
+	i -= len(m.ResourceVersion)
+	copy(dAtA[i:], m.ResourceVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+	i--
+	dAtA[i] = 0x22
+	i--
+	if m.Watch {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.FieldSelector)
+	copy(dAtA[i:], m.FieldSelector)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.LabelSelector)
+	copy(dAtA[i:], m.LabelSelector)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.FieldsV1 != nil {
+		{
+			size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	i -= len(m.FieldsType)
+	copy(dAtA[i:], m.FieldsType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType)))
+	i--
+	dAtA[i] = 0x32
+	if m.Time != nil {
+		{
+			size, err := m.Time.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Operation)
+	copy(dAtA[i:], m.Operation)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Manager)
+	copy(dAtA[i:], m.Manager)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ObjectMeta) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.ManagedFields) > 0 {
+		for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0x8a
+		}
+	}
+	i -= len(m.ClusterName)
+	copy(dAtA[i:], m.ClusterName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName)))
+	i--
+	dAtA[i] = 0x7a
+	if len(m.Finalizers) > 0 {
+		for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Finalizers[iNdEx])
+			copy(dAtA[i:], m.Finalizers[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx])))
+			i--
+			dAtA[i] = 0x72
+		}
+	}
+	if len(m.OwnerReferences) > 0 {
+		for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x6a
+		}
+	}
+	if len(m.Annotations) > 0 {
+		keysForAnnotations := make([]string, 0, len(m.Annotations))
+		for k := range m.Annotations {
+			keysForAnnotations = append(keysForAnnotations, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+		for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Annotations[string(keysForAnnotations[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAnnotations[iNdEx])
+			copy(dAtA[i:], keysForAnnotations[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x62
+		}
+	}
+	if len(m.Labels) > 0 {
+		keysForLabels := make([]string, 0, len(m.Labels))
+		for k := range m.Labels {
+			keysForLabels = append(keysForLabels, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+		for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Labels[string(keysForLabels[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForLabels[iNdEx])
+			copy(dAtA[i:], keysForLabels[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x5a
+		}
+	}
+	if m.DeletionGracePeriodSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds))
+		i--
+		dAtA[i] = 0x50
+	}
+	if m.DeletionTimestamp != nil {
+		{
+			size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	{
+		size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x42
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Generation))
+	i--
+	dAtA[i] = 0x38
+	i -= len(m.ResourceVersion)
+	copy(dAtA[i:], m.ResourceVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.SelfLink)
+	copy(dAtA[i:], m.SelfLink)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.GenerateName)
+	copy(dAtA[i:], m.GenerateName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *OwnerReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.BlockOwnerDeletion != nil {
+		i--
+		if *m.BlockOwnerDeletion {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.Controller != nil {
+		i--
+		if *m.Controller {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Patch) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Patch) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	return len(dAtA) - i, nil
+}
+
+func (m *PatchOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.FieldManager)
+	copy(dAtA[i:], m.FieldManager)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager)))
+	i--
+	dAtA[i] = 0x1a
+	if m.Force != nil {
+		i--
+		if *m.Force {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.DryRun) > 0 {
+		for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.DryRun[iNdEx])
+			copy(dAtA[i:], m.DryRun[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Preconditions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ResourceVersion != nil {
+		i -= len(*m.ResourceVersion)
+		copy(dAtA[i:], *m.ResourceVersion)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.UID != nil {
+		i -= len(*m.UID)
+		copy(dAtA[i:], *m.UID)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RootPaths) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Paths) > 0 {
+		for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Paths[iNdEx])
+			copy(dAtA[i:], m.Paths[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ServerAddress)
+	copy(dAtA[i:], m.ServerAddress)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.ClientCIDR)
+	copy(dAtA[i:], m.ClientCIDR)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Status) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Status) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Code))
+	i--
+	dAtA[i] = 0x30
+	if m.Details != nil {
+		{
+			size, err := m.Details.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusCause) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Field)
+	copy(dAtA[i:], m.Field)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusDetails) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x32
+	i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds))
+	i--
+	dAtA[i] = 0x28
+	if len(m.Causes) > 0 {
+		for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TableOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.IncludeObject)
+	copy(dAtA[i:], m.IncludeObject)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Timestamp) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *TypeMeta) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *UpdateOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.FieldManager)
+	copy(dAtA[i:], m.FieldManager)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager)))
+	i--
+	dAtA[i] = 0x12
+	if len(m.DryRun) > 0 {
+		for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.DryRun[iNdEx])
+			copy(dAtA[i:], m.DryRun[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m Verbs) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m Verbs) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m) > 0 {
+		for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m[iNdEx])
+			copy(dAtA[i:], m[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchEvent) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Object.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *APIGroup) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Versions) > 0 {
+		for _, e := range m.Versions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.PreferredVersion.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.ServerAddressByClientCIDRs) > 0 {
+		for _, e := range m.ServerAddressByClientCIDRs {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *APIGroupList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Groups) > 0 {
+		for _, e := range m.Groups {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *APIResource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Verbs != nil {
+		l = m.Verbs.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ShortNames) > 0 {
+		for _, s := range m.ShortNames {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.SingularName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Categories) > 0 {
+		for _, s := range m.Categories {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Version)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StorageVersionHash)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *APIResourceList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.GroupVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.APIResources) > 0 {
+		for _, e := range m.APIResources {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *APIVersions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Versions) > 0 {
+		for _, s := range m.Versions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.ServerAddressByClientCIDRs) > 0 {
+		for _, e := range m.ServerAddressByClientCIDRs {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Condition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+	l = m.LastTransitionTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *CreateOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DryRun) > 0 {
+		for _, s := range m.DryRun {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.FieldManager)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *DeleteOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.GracePeriodSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds))
+	}
+	if m.Preconditions != nil {
+		l = m.Preconditions.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.OrphanDependents != nil {
+		n += 2
+	}
+	if m.PropagationPolicy != nil {
+		l = len(*m.PropagationPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.DryRun) > 0 {
+		for _, s := range m.DryRun {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Duration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Duration))
+	return n
+}
+
+func (m *ExportOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 2
+	n += 2
+	return n
+}
+
+func (m *FieldsV1) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Raw != nil {
+		l = len(m.Raw)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *GetOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ResourceVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GroupKind) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GroupResource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Resource)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GroupVersion) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Version)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GroupVersionForDiscovery) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.GroupVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Version)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GroupVersionKind) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Version)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GroupVersionResource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Version)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Resource)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *LabelSelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.MatchLabels) > 0 {
+		for k, v := range m.MatchLabels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.MatchExpressions) > 0 {
+		for _, e := range m.MatchExpressions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *LabelSelectorRequirement) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operator)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Values) > 0 {
+		for _, s := range m.Values {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *List) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ListMeta) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.SelfLink)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ResourceVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Continue)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.RemainingItemCount != nil {
+		n += 1 + sovGenerated(uint64(*m.RemainingItemCount))
+	}
+	return n
+}
+
+func (m *ListOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.LabelSelector)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FieldSelector)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	l = len(m.ResourceVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.TimeoutSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+	}
+	n += 1 + sovGenerated(uint64(m.Limit))
+	l = len(m.Continue)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	l = len(m.ResourceVersionMatch)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ManagedFieldsEntry) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Manager)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operation)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Time != nil {
+		l = m.Time.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.FieldsType)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.FieldsV1 != nil {
+		l = m.FieldsV1.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ObjectMeta) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.GenerateName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.SelfLink)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ResourceVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Generation))
+	l = m.CreationTimestamp.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.DeletionTimestamp != nil {
+		l = m.DeletionTimestamp.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.DeletionGracePeriodSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Annotations) > 0 {
+		for k, v := range m.Annotations {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.OwnerReferences) > 0 {
+		for _, e := range m.OwnerReferences {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Finalizers) > 0 {
+		for _, s := range m.Finalizers {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.ClusterName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.ManagedFields) > 0 {
+		for _, e := range m.ManagedFields {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *OwnerReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Controller != nil {
+		n += 2
+	}
+	if m.BlockOwnerDeletion != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *PartialObjectMetadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PartialObjectMetadataList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Patch) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	return n
+}
+
+func (m *PatchOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DryRun) > 0 {
+		for _, s := range m.DryRun {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Force != nil {
+		n += 2
+	}
+	l = len(m.FieldManager)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Preconditions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.UID != nil {
+		l = len(*m.UID)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ResourceVersion != nil {
+		l = len(*m.ResourceVersion)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *RootPaths) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Paths) > 0 {
+		for _, s := range m.Paths {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ServerAddressByClientCIDR) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ClientCIDR)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ServerAddress)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Status) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Details != nil {
+		l = m.Details.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 1 + sovGenerated(uint64(m.Code))
+	return n
+}
+
+func (m *StatusCause) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Field)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *StatusDetails) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Causes) > 0 {
+		for _, e := range m.Causes {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	n += 1 + sovGenerated(uint64(m.RetryAfterSeconds))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *TableOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.IncludeObject)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Timestamp) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Seconds))
+	n += 1 + sovGenerated(uint64(m.Nanos))
+	return n
+}
+
+func (m *TypeMeta) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *UpdateOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DryRun) > 0 {
+		for _, s := range m.DryRun {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.FieldManager)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m Verbs) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m) > 0 {
+		for _, s := range m {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *WatchEvent) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Object.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *APIGroup) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForVersions := "[]GroupVersionForDiscovery{"
+	for _, f := range this.Versions {
+		repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVersions += "}"
+	repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{"
+	for _, f := range this.ServerAddressByClientCIDRs {
+		repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForServerAddressByClientCIDRs += "}"
+	s := strings.Join([]string{`&APIGroup{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Versions:` + repeatedStringForVersions + `,`,
+		`PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`,
+		`ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *APIGroupList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForGroups := "[]APIGroup{"
+	for _, f := range this.Groups {
+		repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForGroups += "}"
+	s := strings.Join([]string{`&APIGroupList{`,
+		`Groups:` + repeatedStringForGroups + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *APIResource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&APIResource{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Namespaced:` + fmt.Sprintf("%v", this.Namespaced) + `,`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`Verbs:` + strings.Replace(fmt.Sprintf("%v", this.Verbs), "Verbs", "Verbs", 1) + `,`,
+		`ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`,
+		`SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`,
+		`Categories:` + fmt.Sprintf("%v", this.Categories) + `,`,
+		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+		`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+		`StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *APIResourceList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForAPIResources := "[]APIResource{"
+	for _, f := range this.APIResources {
+		repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForAPIResources += "}"
+	s := strings.Join([]string{`&APIResourceList{`,
+		`GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`,
+		`APIResources:` + repeatedStringForAPIResources + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Condition) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Condition{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+		`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "Time", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateOptions{`,
+		`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+		`FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteOptions{`,
+		`GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`,
+		`Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`,
+		`OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
+		`PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
+		`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Duration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Duration{`,
+		`Duration:` + fmt.Sprintf("%v", this.Duration) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExportOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ExportOptions{`,
+		`Export:` + fmt.Sprintf("%v", this.Export) + `,`,
+		`Exact:` + fmt.Sprintf("%v", this.Exact) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetOptions{`,
+		`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GroupVersionForDiscovery) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GroupVersionForDiscovery{`,
+		`GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`,
+		`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LabelSelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{"
+	for _, f := range this.MatchExpressions {
+		repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchExpressions += "}"
+	keysForMatchLabels := make([]string, 0, len(this.MatchLabels))
+	for k := range this.MatchLabels {
+		keysForMatchLabels = append(keysForMatchLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
+	mapStringForMatchLabels := "map[string]string{"
+	for _, k := range keysForMatchLabels {
+		mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k])
+	}
+	mapStringForMatchLabels += "}"
+	s := strings.Join([]string{`&LabelSelector{`,
+		`MatchLabels:` + mapStringForMatchLabels + `,`,
+		`MatchExpressions:` + repeatedStringForMatchExpressions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LabelSelectorRequirement) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&LabelSelectorRequirement{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+		`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *List) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]RawExtension{"
+	for _, f := range this.Items {
+		repeatedStringForItems += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&List{`,
+		`ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListMeta) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListMeta{`,
+		`SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`,
+		`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+		`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
+		`RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListOptions{`,
+		`LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`,
+		`FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`,
+		`Watch:` + fmt.Sprintf("%v", this.Watch) + `,`,
+		`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+		`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
+		`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
+		`AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`,
+		`ResourceVersionMatch:` + fmt.Sprintf("%v", this.ResourceVersionMatch) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ManagedFieldsEntry) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ManagedFieldsEntry{`,
+		`Manager:` + fmt.Sprintf("%v", this.Manager) + `,`,
+		`Operation:` + fmt.Sprintf("%v", this.Operation) + `,`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`,
+		`FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`,
+		`FieldsV1:` + strings.Replace(fmt.Sprintf("%v", this.FieldsV1), "FieldsV1", "FieldsV1", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ObjectMeta) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForOwnerReferences := "[]OwnerReference{"
+	for _, f := range this.OwnerReferences {
+		repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForOwnerReferences += "}"
+	repeatedStringForManagedFields := "[]ManagedFieldsEntry{"
+	for _, f := range this.ManagedFields {
+		repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForManagedFields += "}"
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	keysForAnnotations := make([]string, 0, len(this.Annotations))
+	for k := range this.Annotations {
+		keysForAnnotations = append(keysForAnnotations, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+	mapStringForAnnotations := "map[string]string{"
+	for _, k := range keysForAnnotations {
+		mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+	}
+	mapStringForAnnotations += "}"
+	s := strings.Join([]string{`&ObjectMeta{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+		`Generation:` + fmt.Sprintf("%v", this.Generation) + `,`,
+		`CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`,
+		`DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`,
+		`DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`Annotations:` + mapStringForAnnotations + `,`,
+		`OwnerReferences:` + repeatedStringForOwnerReferences + `,`,
+		`Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`,
+		`ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`,
+		`ManagedFields:` + repeatedStringForManagedFields + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *OwnerReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&OwnerReference{`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`Controller:` + valueToStringGenerated(this.Controller) + `,`,
+		`BlockOwnerDeletion:` + valueToStringGenerated(this.BlockOwnerDeletion) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PartialObjectMetadata) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PartialObjectMetadata{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PartialObjectMetadataList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]PartialObjectMetadata{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PartialObjectMetadataList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Patch) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Patch{`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PatchOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PatchOptions{`,
+		`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+		`Force:` + valueToStringGenerated(this.Force) + `,`,
+		`FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Preconditions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Preconditions{`,
+		`UID:` + valueToStringGenerated(this.UID) + `,`,
+		`ResourceVersion:` + valueToStringGenerated(this.ResourceVersion) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *RootPaths) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RootPaths{`,
+		`Paths:` + fmt.Sprintf("%v", this.Paths) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServerAddressByClientCIDR) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ServerAddressByClientCIDR{`,
+		`ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`,
+		`ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Status) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Status{`,
+		`ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`,
+		`Code:` + fmt.Sprintf("%v", this.Code) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StatusCause) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StatusCause{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Field:` + fmt.Sprintf("%v", this.Field) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StatusDetails) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForCauses := "[]StatusCause{"
+	for _, f := range this.Causes {
+		repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForCauses += "}"
+	s := strings.Join([]string{`&StatusDetails{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`Causes:` + repeatedStringForCauses + `,`,
+		`RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TableOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TableOptions{`,
+		`IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Timestamp) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Timestamp{`,
+		`Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`,
+		`Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TypeMeta) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TypeMeta{`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateOptions{`,
+		`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+		`FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WatchEvent) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WatchEvent{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *APIGroup) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: APIGroup: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Versions = append(m.Versions, GroupVersionForDiscovery{})
+			if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.PreferredVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{})
+			if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *APIGroupList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Groups = append(m.Groups, APIGroup{})
+			if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *APIResource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: APIResource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Namespaced = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Verbs == nil {
+				m.Verbs = Verbs{}
+			}
+			if err := m.Verbs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SingularName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SingularName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StorageVersionHash = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *APIResourceList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.GroupVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIResources = append(m.APIResources, APIResource{})
+			if err := m.APIResources[len(m.APIResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *APIVersions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: APIVersions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Versions = append(m.Versions, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{})
+			if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Condition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Condition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Condition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+			}
+			m.ObservedGeneration = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ObservedGeneration |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldManager = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.GracePeriodSeconds = &v
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Preconditions == nil {
+				m.Preconditions = &Preconditions{}
+			}
+			if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.OrphanDependents = &b
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := DeletionPropagation(dAtA[iNdEx:postIndex])
+			m.PropagationPolicy = &s
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Duration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Duration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType)
+			}
+			m.Duration = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Duration |= time.Duration(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExportOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Export = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Exact = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *FieldsV1) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...)
+			if m.Raw == nil {
+				m.Raw = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GroupKind) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GroupKind: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GroupResource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GroupResource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GroupVersion) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.GroupVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GroupVersionKind) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GroupVersionResource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LabelSelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.MatchLabels == nil {
+				m.MatchLabels = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.MatchLabels[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{})
+			if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *List) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: List: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, runtime.RawExtension{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListMeta) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListMeta: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SelfLink = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Continue = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.RemainingItemCount = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.LabelSelector = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldSelector = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Watch = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TimeoutSeconds = &v
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+			}
+			m.Limit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Limit |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Continue = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.AllowWatchBookmarks = bool(v != 0)
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersionMatch", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersionMatch = ResourceVersionMatch(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ManagedFieldsEntry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ManagedFieldsEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Manager = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operation = ManagedFieldsOperationType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Time == nil {
+				m.Time = &Time{}
+			}
+			if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldsType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FieldsV1 == nil {
+				m.FieldsV1 = &FieldsV1{}
+			}
+			if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ObjectMeta) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.GenerateName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SelfLink = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+			}
+			m.Generation = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Generation |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DeletionTimestamp == nil {
+				m.DeletionTimestamp = &Time{}
+			}
+			if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.DeletionGracePeriodSeconds = &v
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Labels[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Annotations == nil {
+				m.Annotations = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Annotations[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.OwnerReferences = append(m.OwnerReferences, OwnerReference{})
+			if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClusterName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 17:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{})
+			if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *OwnerReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Controller = &b
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.BlockOwnerDeletion = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, PartialObjectMetadata{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Patch) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Patch: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PatchOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PatchOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PatchOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Force = &b
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldManager = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Preconditions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Preconditions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			m.UID = &s
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.ResourceVersion = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RootPaths) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RootPaths: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClientCIDR = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ServerAddress = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Status) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Status: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = StatusReason(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Details == nil {
+				m.Details = &StatusDetails{}
+			}
+			if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
+			}
+			m.Code = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Code |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusCause) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusCause: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = CauseType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Field = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusDetails) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Causes = append(m.Causes, StatusCause{})
+			if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType)
+			}
+			m.RetryAfterSeconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RetryAfterSeconds |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TableOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TableOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Timestamp) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
+			}
+			m.Seconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Seconds |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
+			}
+			m.Nanos = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Nanos |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TypeMeta) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldManager = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Verbs) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Verbs: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Verbs: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			*m = append(*m, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchEvent) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchEvent: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchEvent: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
new file mode 100644
index 0000000..b72d43f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -0,0 +1,1103 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.apis.meta.v1;
+
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// APIGroup contains the name, the supported versions, and the preferred version
+// of a group.
+message APIGroup {
+  // name is the name of the group.
+  optional string name = 1;
+
+  // versions are the versions supported in this group.
+  repeated GroupVersionForDiscovery versions = 2;
+
+  // preferredVersion is the version preferred by the API server, which
+  // probably is the storage version.
+  // +optional
+  optional GroupVersionForDiscovery preferredVersion = 3;
+
+  // a map of client CIDR to server address that is serving this group.
+  // This is to help clients reach servers in the most network-efficient way possible.
+  // Clients can use the appropriate server address as per the CIDR that they match.
+  // In case of multiple matches, clients should use the longest matching CIDR.
+  // The server returns only those CIDRs that it thinks that the client can match.
+  // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+  // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+  // +optional
+  repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4;
+}
+
+// APIGroupList is a list of APIGroup, to allow clients to discover the API at
+// /apis.
+message APIGroupList {
+  // groups is a list of APIGroup.
+  repeated APIGroup groups = 1;
+}
+
+// APIResource specifies the name of a resource and whether it is namespaced.
+message APIResource {
+  // name is the plural name of the resource.
+  optional string name = 1;
+
+  // singularName is the singular name of the resource.  This allows clients to handle plural and singular opaquely.
+  // The singularName is more correct for reporting status on a single item and both singular and plural are allowed
+  // from the kubectl CLI interface.
+  optional string singularName = 6;
+
+  // namespaced indicates if a resource is namespaced or not.
+  optional bool namespaced = 2;
+
+  // group is the preferred group of the resource.  Empty implies the group of the containing resource list.
+  // For subresources, this may have a different value, for example: Scale".
+  optional string group = 8;
+
+  // version is the preferred version of the resource.  Empty implies the version of the containing resource list
+  // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
+  optional string version = 9;
+
+  // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
+  optional string kind = 3;
+
+  // verbs is a list of supported kube verbs (this includes get, list, watch, create,
+  // update, patch, delete, deletecollection, and proxy)
+  optional Verbs verbs = 4;
+
+  // shortNames is a list of suggested short names of the resource.
+  repeated string shortNames = 5;
+
+  // categories is a list of the grouped resources this resource belongs to (e.g. 'all')
+  repeated string categories = 7;
+
+  // The hash value of the storage version, the version this resource is
+  // converted to when written to the data store. Value must be treated
+  // as opaque by clients. Only equality comparison on the value is valid.
+  // This is an alpha feature and may change or be removed in the future.
+  // The field is populated by the apiserver only if the
+  // StorageVersionHash feature gate is enabled.
+  // This field will remain optional even if it graduates.
+  // +optional
+  optional string storageVersionHash = 10;
+}
+
+// APIResourceList is a list of APIResource, it is used to expose the name of the
+// resources supported in a specific group and version, and if the resource
+// is namespaced.
+message APIResourceList {
+  // groupVersion is the group and version this APIResourceList is for.
+  optional string groupVersion = 1;
+
+  // resources contains the name of the resources and if they are namespaced.
+  repeated APIResource resources = 2;
+}
+
+// APIVersions lists the versions that are available, to allow clients to
+// discover the API at /api, which is the root path of the legacy v1 API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message APIVersions {
+  // versions are the api versions that are available.
+  repeated string versions = 1;
+
+  // a map of client CIDR to server address that is serving this group.
+  // This is to help clients reach servers in the most network-efficient way possible.
+  // Clients can use the appropriate server address as per the CIDR that they match.
+  // In case of multiple matches, clients should use the longest matching CIDR.
+  // The server returns only those CIDRs that it thinks that the client can match.
+  // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+  // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+  repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2;
+}
+
+// Condition contains details for one aspect of the current state of this API Resource.
+// ---
+// This struct is intended for direct use as an array at the field path .status.conditions.  For example,
+// type FooStatus struct{
+//     // Represents the observations of a foo's current state.
+//     // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
+//     // +patchMergeKey=type
+//     // +patchStrategy=merge
+//     // +listType=map
+//     // +listMapKey=type
+//     Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+//
+//     // other fields
+// }
+message Condition {
+  // type of condition in CamelCase or in foo.example.com/CamelCase.
+  // ---
+  // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
+  // useful (see .node.status.conditions), the ability to deconflict is important.
+  // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+  // +required
+  // +kubebuilder:validation:Required
+  // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+  // +kubebuilder:validation:MaxLength=316
+  optional string type = 1;
+
+  // status of the condition, one of True, False, Unknown.
+  // +required
+  // +kubebuilder:validation:Required
+  // +kubebuilder:validation:Enum=True;False;Unknown
+  optional string status = 2;
+
+  // observedGeneration represents the .metadata.generation that the condition was set based upon.
+  // For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+  // with respect to the current state of the instance.
+  // +optional
+  // +kubebuilder:validation:Minimum=0
+  optional int64 observedGeneration = 3;
+
+  // lastTransitionTime is the last time the condition transitioned from one status to another.
+  // This should be when the underlying condition changed.  If that is not known, then using the time when the API field changed is acceptable.
+  // +required
+  // +kubebuilder:validation:Required
+  // +kubebuilder:validation:Type=string
+  // +kubebuilder:validation:Format=date-time
+  optional Time lastTransitionTime = 4;
+
+  // reason contains a programmatic identifier indicating the reason for the condition's last transition.
+  // Producers of specific condition types may define expected values and meanings for this field,
+  // and whether the values are considered a guaranteed API.
+  // The value should be a CamelCase string.
+  // This field may not be empty.
+  // +required
+  // +kubebuilder:validation:Required
+  // +kubebuilder:validation:MaxLength=1024
+  // +kubebuilder:validation:MinLength=1
+  // +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
+  optional string reason = 5;
+
+  // message is a human readable message indicating details about the transition.
+  // This may be an empty string.
+  // +required
+  // +kubebuilder:validation:Required
+  // +kubebuilder:validation:MaxLength=32768
+  optional string message = 6;
+}
+
+// CreateOptions may be provided when creating an API object.
+message CreateOptions {
+  // When present, indicates that modifications should not be
+  // persisted. An invalid or unrecognized dryRun directive will
+  // result in an error response and no further processing of the
+  // request. Valid values are:
+  // - All: all dry run stages will be processed
+  // +optional
+  repeated string dryRun = 1;
+
+  // fieldManager is a name associated with the actor or entity
+  // that is making these changes. The value must be less than or
+  // 128 characters long, and only contain printable characters,
+  // as defined by https://golang.org/pkg/unicode/#IsPrint.
+  // +optional
+  optional string fieldManager = 3;
+}
+
+// DeleteOptions may be provided when deleting an API object.
+message DeleteOptions {
+  // The duration in seconds before the object should be deleted. Value must be non-negative integer.
+  // The value zero indicates delete immediately. If this value is nil, the default grace period for the
+  // specified type will be used.
+  // Defaults to a per object value if not specified. zero means delete immediately.
+  // +optional
+  optional int64 gracePeriodSeconds = 1;
+
+  // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+  // returned.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional Preconditions preconditions = 2;
+
+  // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
+  // Should the dependent objects be orphaned. If true/false, the "orphan"
+  // finalizer will be added to/removed from the object's finalizers list.
+  // Either this field or PropagationPolicy may be set, but not both.
+  // +optional
+  optional bool orphanDependents = 3;
+
+  // Whether and how garbage collection will be performed.
+  // Either this field or OrphanDependents may be set, but not both.
+  // The default policy is decided by the existing finalizer set in the
+  // metadata.finalizers and the resource-specific default policy.
+  // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
+  // allow the garbage collector to delete the dependents in the background;
+  // 'Foreground' - a cascading policy that deletes all dependents in the
+  // foreground.
+  // +optional
+  optional string propagationPolicy = 4;
+
+  // When present, indicates that modifications should not be
+  // persisted. An invalid or unrecognized dryRun directive will
+  // result in an error response and no further processing of the
+  // request. Valid values are:
+  // - All: all dry run stages will be processed
+  // +optional
+  repeated string dryRun = 5;
+}
+
+// Duration is a wrapper around time.Duration which supports correct
+// marshaling to YAML and JSON. In particular, it marshals into strings, which
+// can be used as map keys in json.
+message Duration {
+  optional int64 duration = 1;
+}
+
+// ExportOptions is the query options to the standard REST get call.
+// Deprecated. Planned for removal in 1.18.
+message ExportOptions {
+  // Should this value be exported.  Export strips fields that a user can not specify.
+  // Deprecated. Planned for removal in 1.18.
+  optional bool export = 1;
+
+  // Should the export be exact.  Exact export maintains cluster-specific fields like 'Namespace'.
+  // Deprecated. Planned for removal in 1.18.
+  optional bool exact = 2;
+}
+
+// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
+//
+// Each key is either a '.' representing the field itself, and will always map to an empty set,
+// or a string representing a sub-field or item. The string will follow one of these four formats:
+// 'f:<name>', where <name> is the name of a field in a struct, or key in a map
+// 'v:<value>', where <value> is the exact json formatted value of a list item
+// 'i:<index>', where <index> is position of a item in a list
+// 'k:<keys>', where <keys> is a map of  a list item's key fields to their unique values
+// If a key maps to an empty Fields value, the field that key represents is part of the set.
+//
+// The exact format is defined in sigs.k8s.io/structured-merge-diff
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message FieldsV1 {
+  // Raw is the underlying serialization of this object.
+  optional bytes Raw = 1;
+}
+
+// GetOptions is the standard query options to the standard REST get call.
+message GetOptions {
+  // resourceVersion sets a constraint on what resource versions a request may be served from.
+  // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+  // details.
+  //
+  // Defaults to unset
+  // +optional
+  optional string resourceVersion = 1;
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupKind {
+  optional string group = 1;
+
+  optional string kind = 2;
+}
+
+// GroupResource specifies a Group and a Resource, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupResource {
+  optional string group = 1;
+
+  optional string resource = 2;
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersion {
+  optional string group = 1;
+
+  optional string version = 2;
+}
+
+// GroupVersion contains the "group/version" and "version" string of a version.
+// It is made a struct to keep extensibility.
+message GroupVersionForDiscovery {
+  // groupVersion specifies the API group and version in the form "group/version"
+  optional string groupVersion = 1;
+
+  // version specifies the version in the form of "version". This is to save
+  // the clients the trouble of splitting the GroupVersion.
+  optional string version = 2;
+}
+
+// GroupVersionKind unambiguously identifies a kind.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersionKind {
+  optional string group = 1;
+
+  optional string version = 2;
+
+  optional string kind = 3;
+}
+
+// GroupVersionResource unambiguously identifies a resource.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message GroupVersionResource {
+  optional string group = 1;
+
+  optional string version = 2;
+
+  optional string resource = 3;
+}
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+message LabelSelector {
+  // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+  // map is equivalent to an element of matchExpressions, whose key field is "key", the
+  // operator is "In", and the values array contains only "value". The requirements are ANDed.
+  // +optional
+  map<string, string> matchLabels = 1;
+
+  // matchExpressions is a list of label selector requirements. The requirements are ANDed.
+  // +optional
+  repeated LabelSelectorRequirement matchExpressions = 2;
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+message LabelSelectorRequirement {
+  // key is the label key that the selector applies to.
+  // +patchMergeKey=key
+  // +patchStrategy=merge
+  optional string key = 1;
+
+  // operator represents a key's relationship to a set of values.
+  // Valid operators are In, NotIn, Exists and DoesNotExist.
+  optional string operator = 2;
+
+  // values is an array of string values. If the operator is In or NotIn,
+  // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+  // the values array must be empty. This array is replaced during a strategic
+  // merge patch.
+  // +optional
+  repeated string values = 3;
+}
+
+// List holds a list of objects, which may not be known by the server.
+message List {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional ListMeta metadata = 1;
+
+  // List of objects
+  repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
+}
+
+// ListMeta describes metadata that synthetic resources must have, including lists and
+// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
+message ListMeta {
+  // selfLink is a URL representing this object.
+  // Populated by the system.
+  // Read-only.
+  //
+  // DEPRECATED
+  // Kubernetes will stop propagating this field in 1.20 release and the field is planned
+  // to be removed in 1.21 release.
+  // +optional
+  optional string selfLink = 1;
+
+  // String that identifies the server's internal version of this object that
+  // can be used by clients to determine when objects have changed.
+  // Value must be treated as opaque by clients and passed unmodified back to the server.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+  // +optional
+  optional string resourceVersion = 2;
+
+  // continue may be set if the user set a limit on the number of items returned, and indicates that
+  // the server has more data available. The value is opaque and may be used to issue another request
+  // to the endpoint that served this list to retrieve the next set of available objects. Continuing a
+  // consistent list may not be possible if the server configuration has changed or more than a few
+  // minutes have passed. The resourceVersion field returned when using this continue value will be
+  // identical to the value in the first response, unless you have received this token from an error
+  // message.
+  optional string continue = 3;
+
+  // remainingItemCount is the number of subsequent items in the list which are not included in this
+  // list response. If the list request contained label or field selectors, then the number of
+  // remaining items is unknown and the field will be left unset and omitted during serialization.
+  // If the list is complete (either because it is not chunking or because this is the last chunk),
+  // then there are no more remaining items and this field will be left unset and omitted during
+  // serialization.
+  // Servers older than v1.15 do not set this field.
+  // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients
+  // should not rely on the remainingItemCount to be set or to be exact.
+  // +optional
+  optional int64 remainingItemCount = 4;
+}
+
+// ListOptions is the query options to a standard REST list call.
+message ListOptions {
+  // A selector to restrict the list of returned objects by their labels.
+  // Defaults to everything.
+  // +optional
+  optional string labelSelector = 1;
+
+  // A selector to restrict the list of returned objects by their fields.
+  // Defaults to everything.
+  // +optional
+  optional string fieldSelector = 2;
+
+  // Watch for changes to the described resources and return them as a stream of
+  // add, update, and remove notifications. Specify resourceVersion.
+  // +optional
+  optional bool watch = 3;
+
+  // allowWatchBookmarks requests watch events with type "BOOKMARK".
+  // Servers that do not implement bookmarks may ignore this flag and
+  // bookmarks are sent at the server's discretion. Clients should not
+  // assume bookmarks are returned at any specific interval, nor may they
+  // assume the server will send any BOOKMARK event during a session.
+  // If this is not a watch, this field is ignored.
+  // If the feature gate WatchBookmarks is not enabled in apiserver,
+  // this field is ignored.
+  // +optional
+  optional bool allowWatchBookmarks = 9;
+
+  // resourceVersion sets a constraint on what resource versions a request may be served from.
+  // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+  // details.
+  //
+  // Defaults to unset
+  // +optional
+  optional string resourceVersion = 4;
+
+  // resourceVersionMatch determines how resourceVersion is applied to list calls.
+  // It is highly recommended that resourceVersionMatch be set for list calls where
+  // resourceVersion is set
+  // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+  // details.
+  //
+  // Defaults to unset
+  // +optional
+  optional string resourceVersionMatch = 10;
+
+  // Timeout for the list/watch call.
+  // This limits the duration of the call, regardless of any activity or inactivity.
+  // +optional
+  optional int64 timeoutSeconds = 5;
+
+  // limit is a maximum number of responses to return for a list call. If more items exist, the
+  // server will set the `continue` field on the list metadata to a value that can be used with the
+  // same initial query to retrieve the next set of results. Setting a limit may return fewer than
+  // the requested amount of items (up to zero items) in the event all requested objects are
+  // filtered out and clients should only use the presence of the continue field to determine whether
+  // more results are available. Servers may choose not to support the limit argument and will return
+  // all of the available results. If limit is specified and the continue field is empty, clients may
+  // assume that no more results are available. This field is not supported if watch is true.
+  //
+  // The server guarantees that the objects returned when using continue will be identical to issuing
+  // a single list call without a limit - that is, no objects created, modified, or deleted after the
+  // first request is issued will be included in any subsequent continued requests. This is sometimes
+  // referred to as a consistent snapshot, and ensures that a client that is using limit to receive
+  // smaller chunks of a very large result can ensure they see all possible objects. If objects are
+  // updated during a chunked list the version of the object that was present at the time the first list
+  // result was calculated is returned.
+  optional int64 limit = 7;
+
+  // The continue option should be set when retrieving more results from the server. Since this value is
+  // server defined, clients may only use the continue value from a previous query result with identical
+  // query parameters (except for the value of continue) and the server may reject a continue value it
+  // does not recognize. If the specified continue value is no longer valid whether due to expiration
+  // (generally five to fifteen minutes) or a configuration change on the server, the server will
+  // respond with a 410 ResourceExpired error together with a continue token. If the client needs a
+  // consistent list, it must restart their list without the continue field. Otherwise, the client may
+  // send another list request with the token received with the 410 error, the server will respond with
+  // a list starting from the next key, but from the latest snapshot, which is inconsistent from the
+  // previous list results - objects that are created, modified, or deleted after the first list request
+  // will be included in the response, as long as their keys are after the "next key".
+  //
+  // This field is not supported when watch is true. Clients may start a watch from the last
+  // resourceVersion value returned by the server and not miss any modifications.
+  optional string continue = 8;
+}
+
+// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
+// that the fieldset applies to.
+message ManagedFieldsEntry {
+  // Manager is an identifier of the workflow managing these fields.
+  optional string manager = 1;
+
+  // Operation is the type of operation which lead to this ManagedFieldsEntry being created.
+  // The only valid values for this field are 'Apply' and 'Update'.
+  optional string operation = 2;
+
+  // APIVersion defines the version of this resource that this field set
+  // applies to. The format is "group/version" just like the top-level
+  // APIVersion field. It is necessary to track the version of a field
+  // set because it cannot be automatically converted.
+  optional string apiVersion = 3;
+
+  // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'
+  // +optional
+  optional Time time = 4;
+
+  // FieldsType is the discriminator for the different fields format and version.
+  // There is currently only one possible value: "FieldsV1"
+  optional string fieldsType = 6;
+
+  // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
+  // +optional
+  optional FieldsV1 fieldsV1 = 7;
+}
+
+// MicroTime is version of Time with microsecond level precision.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message MicroTime {
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  optional int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive. This field may be limited in precision depending on context.
+  optional int32 nanos = 2;
+}
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+message ObjectMeta {
+  // Name must be unique within a namespace. Is required when creating resources, although
+  // some resources may allow a client to request the generation of an appropriate name
+  // automatically. Name is primarily intended for creation idempotence and configuration
+  // definition.
+  // Cannot be updated.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#names
+  // +optional
+  optional string name = 1;
+
+  // GenerateName is an optional prefix, used by the server, to generate a unique
+  // name ONLY IF the Name field has not been provided.
+  // If this field is used, the name returned to the client will be different
+  // than the name passed. This value will also be combined with a unique suffix.
+  // The provided value has the same validation rules as the Name field,
+  // and may be truncated by the length of the suffix required to make the value
+  // unique on the server.
+  //
+  // If this field is specified and the generated name exists, the server will
+  // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
+  // ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+  // should retry (optionally after the time indicated in the Retry-After header).
+  //
+  // Applied only if Name is not specified.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
+  // +optional
+  optional string generateName = 2;
+
+  // Namespace defines the space within which each name must be unique. An empty namespace is
+  // equivalent to the "default" namespace, but "default" is the canonical representation.
+  // Not all objects are required to be scoped to a namespace - the value of this field for
+  // those objects will be empty.
+  //
+  // Must be a DNS_LABEL.
+  // Cannot be updated.
+  // More info: http://kubernetes.io/docs/user-guide/namespaces
+  // +optional
+  optional string namespace = 3;
+
+  // SelfLink is a URL representing this object.
+  // Populated by the system.
+  // Read-only.
+  //
+  // DEPRECATED
+  // Kubernetes will stop propagating this field in 1.20 release and the field is planned
+  // to be removed in 1.21 release.
+  // +optional
+  optional string selfLink = 4;
+
+  // UID is the unique in time and space value for this object. It is typically generated by
+  // the server on successful creation of a resource and is not allowed to change on PUT
+  // operations.
+  //
+  // Populated by the system.
+  // Read-only.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+  // +optional
+  optional string uid = 5;
+
+  // An opaque value that represents the internal version of this object that can
+  // be used by clients to determine when objects have changed. May be used for optimistic
+  // concurrency, change detection, and the watch operation on a resource or set of resources.
+  // Clients must treat these values as opaque and passed unmodified back to the server.
+  // They may only be valid for a particular resource or set of resources.
+  //
+  // Populated by the system.
+  // Read-only.
+  // Value must be treated as opaque by clients and .
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+  // +optional
+  optional string resourceVersion = 6;
+
+  // A sequence number representing a specific generation of the desired state.
+  // Populated by the system. Read-only.
+  // +optional
+  optional int64 generation = 7;
+
+  // CreationTimestamp is a timestamp representing the server time when this object was
+  // created. It is not guaranteed to be set in happens-before order across separate operations.
+  // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+  //
+  // Populated by the system.
+  // Read-only.
+  // Null for lists.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional Time creationTimestamp = 8;
+
+  // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
+  // field is set by the server when a graceful deletion is requested by the user, and is not
+  // directly settable by a client. The resource is expected to be deleted (no longer visible
+  // from resource lists, and not reachable by name) after the time in this field, once the
+  // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
+  // Once the deletionTimestamp is set, this value may not be unset or be set further into the
+  // future, although it may be shortened or the resource may be deleted prior to this time.
+  // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
+  // by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
+  // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
+  // remove the pod from the API. In the presence of network partitions, this object may still
+  // exist after this timestamp, until an administrator or automated process can determine the
+  // resource is fully terminated.
+  // If not set, graceful deletion of the object has not been requested.
+  //
+  // Populated by the system when a graceful deletion is requested.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional Time deletionTimestamp = 9;
+
+  // Number of seconds allowed for this object to gracefully terminate before
+  // it will be removed from the system. Only set when deletionTimestamp is also set.
+  // May only be shortened.
+  // Read-only.
+  // +optional
+  optional int64 deletionGracePeriodSeconds = 10;
+
+  // Map of string keys and values that can be used to organize and categorize
+  // (scope and select) objects. May match selectors of replication controllers
+  // and services.
+  // More info: http://kubernetes.io/docs/user-guide/labels
+  // +optional
+  map<string, string> labels = 11;
+
+  // Annotations is an unstructured key value map stored with a resource that may be
+  // set by external tools to store and retrieve arbitrary metadata. They are not
+  // queryable and should be preserved when modifying objects.
+  // More info: http://kubernetes.io/docs/user-guide/annotations
+  // +optional
+  map<string, string> annotations = 12;
+
+  // List of objects depended by this object. If ALL objects in the list have
+  // been deleted, this object will be garbage collected. If this object is managed by a controller,
+  // then an entry in this list will point to this controller, with the controller field set to true.
+  // There cannot be more than one managing controller.
+  // +optional
+  // +patchMergeKey=uid
+  // +patchStrategy=merge
+  repeated OwnerReference ownerReferences = 13;
+
+  // Must be empty before the object is deleted from the registry. Each entry
+  // is an identifier for the responsible component that will remove the entry
+  // from the list. If the deletionTimestamp of the object is non-nil, entries
+  // in this list can only be removed.
+  // Finalizers may be processed and removed in any order.  Order is NOT enforced
+  // because it introduces significant risk of stuck finalizers.
+  // finalizers is a shared field, any actor with permission can reorder it.
+  // If the finalizer list is processed in order, then this can lead to a situation
+  // in which the component responsible for the first finalizer in the list is
+  // waiting for a signal (field value, external system, or other) produced by a
+  // component responsible for a finalizer later in the list, resulting in a deadlock.
+  // Without enforced ordering finalizers are free to order amongst themselves and
+  // are not vulnerable to ordering changes in the list.
+  // +optional
+  // +patchStrategy=merge
+  repeated string finalizers = 14;
+
+  // The name of the cluster which the object belongs to.
+  // This is used to distinguish resources with same name and namespace in different clusters.
+  // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
+  // +optional
+  optional string clusterName = 15;
+
+  // ManagedFields maps workflow-id and version to the set of fields
+  // that are managed by that workflow. This is mostly for internal
+  // housekeeping, and users typically shouldn't need to set or
+  // understand this field. A workflow can be the user's name, a
+  // controller's name, or the name of a specific apply path like
+  // "ci-cd". The set of fields is always in the version that the
+  // workflow used when modifying the object.
+  //
+  // +optional
+  repeated ManagedFieldsEntry managedFields = 17;
+}
+
+// OwnerReference contains enough information to let you identify an owning
+// object. An owning object must be in the same namespace as the dependent, or
+// be cluster-scoped, so there is no namespace field.
+message OwnerReference {
+  // API version of the referent.
+  optional string apiVersion = 5;
+
+  // Kind of the referent.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  optional string kind = 1;
+
+  // Name of the referent.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#names
+  optional string name = 3;
+
+  // UID of the referent.
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+  optional string uid = 4;
+
+  // If true, this reference points to the managing controller.
+  // +optional
+  optional bool controller = 6;
+
+  // If true, AND if the owner has the "foregroundDeletion" finalizer, then
+  // the owner cannot be deleted from the key-value store until this
+  // reference is removed.
+  // Defaults to false.
+  // To set this field, a user needs "delete" permission of the owner,
+  // otherwise 422 (Unprocessable Entity) will be returned.
+  // +optional
+  optional bool blockOwnerDeletion = 7;
+}
+
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadata {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional ObjectMeta metadata = 1;
+}
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadataList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional ListMeta metadata = 1;
+
+  // items contains each of the included items.
+  repeated PartialObjectMetadata items = 2;
+}
+
+// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
+message Patch {
+}
+
+// PatchOptions may be provided when patching an API object.
+// PatchOptions is meant to be a superset of UpdateOptions.
+message PatchOptions {
+  // When present, indicates that modifications should not be
+  // persisted. An invalid or unrecognized dryRun directive will
+  // result in an error response and no further processing of the
+  // request. Valid values are:
+  // - All: all dry run stages will be processed
+  // +optional
+  repeated string dryRun = 1;
+
+  // Force is going to "force" Apply requests. It means user will
+  // re-acquire conflicting fields owned by other people. Force
+  // flag must be unset for non-apply patch requests.
+  // +optional
+  optional bool force = 2;
+
+  // fieldManager is a name associated with the actor or entity
+  // that is making these changes. The value must be less than or
+  // 128 characters long, and only contain printable characters,
+  // as defined by https://golang.org/pkg/unicode/#IsPrint. This
+  // field is required for apply requests
+  // (application/apply-patch) but optional for non-apply patch
+  // types (JsonPatch, MergePatch, StrategicMergePatch).
+  // +optional
+  optional string fieldManager = 3;
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+message Preconditions {
+  // Specifies the target UID.
+  // +optional
+  optional string uid = 1;
+
+  // Specifies the target ResourceVersion
+  // +optional
+  optional string resourceVersion = 2;
+}
+
+// RootPaths lists the paths available at root.
+// For example: "/healthz", "/apis".
+message RootPaths {
+  // paths are the paths available at root.
+  repeated string paths = 1;
+}
+
+// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
+message ServerAddressByClientCIDR {
+  // The CIDR with which clients can match their IP to figure out the server address that they should use.
+  optional string clientCIDR = 1;
+
+  // Address of this server, suitable for a client that matches the above CIDR.
+  // This can be a hostname, hostname:port, IP or IP:port.
+  optional string serverAddress = 2;
+}
+
+// Status is a return value for calls that don't return other objects.
+message Status {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional ListMeta metadata = 1;
+
+  // Status of the operation.
+  // One of: "Success" or "Failure".
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional string status = 2;
+
+  // A human-readable description of the status of this operation.
+  // +optional
+  optional string message = 3;
+
+  // A machine-readable description of why this operation is in the
+  // "Failure" status. If this value is empty there
+  // is no information available. A Reason clarifies an HTTP status
+  // code but does not override it.
+  // +optional
+  optional string reason = 4;
+
+  // Extended data associated with the reason.  Each reason may define its
+  // own extended details. This field is optional and the data returned
+  // is not guaranteed to conform to any schema except that defined by
+  // the reason type.
+  // +optional
+  optional StatusDetails details = 5;
+
+  // Suggested HTTP return code for this status, 0 if not set.
+  // +optional
+  optional int32 code = 6;
+}
+
+// StatusCause provides more information about an api.Status failure, including
+// cases when multiple errors are encountered.
+message StatusCause {
+  // A machine-readable description of the cause of the error. If this value is
+  // empty there is no information available.
+  // +optional
+  optional string reason = 1;
+
+  // A human-readable description of the cause of the error.  This field may be
+  // presented as-is to a reader.
+  // +optional
+  optional string message = 2;
+
+  // The field of the resource that has caused this error, as named by its JSON
+  // serialization. May include dot and postfix notation for nested attributes.
+  // Arrays are zero-indexed.  Fields may appear more than once in an array of
+  // causes due to fields having multiple errors.
+  // Optional.
+  //
+  // Examples:
+  //   "name" - the field "name" on the current resource
+  //   "items[0].name" - the field "name" on the first array entry in "items"
+  // +optional
+  optional string field = 3;
+}
+
+// StatusDetails is a set of additional properties that MAY be set by the
+// server to provide additional information about a response. The Reason
+// field of a Status object defines what attributes will be set. Clients
+// must ignore fields that do not match the defined type of each attribute,
+// and should assume that any attribute may be empty, invalid, or under
+// defined.
+message StatusDetails {
+  // The name attribute of the resource associated with the status StatusReason
+  // (when there is a single name which can be described).
+  // +optional
+  optional string name = 1;
+
+  // The group attribute of the resource associated with the status StatusReason.
+  // +optional
+  optional string group = 2;
+
+  // The kind attribute of the resource associated with the status StatusReason.
+  // On some operations may differ from the requested resource Kind.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional string kind = 3;
+
+  // UID of the resource.
+  // (when there is a single resource which can be described).
+  // More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+  // +optional
+  optional string uid = 6;
+
+  // The Causes array includes more details associated with the StatusReason
+  // failure. Not all StatusReasons may provide detailed causes.
+  // +optional
+  repeated StatusCause causes = 4;
+
+  // If specified, the time in seconds before the operation should be retried. Some errors may indicate
+  // the client must take an alternate action - for those errors this field may indicate how long to wait
+  // before taking the alternate action.
+  // +optional
+  optional int32 retryAfterSeconds = 5;
+}
+
+// TableOptions are used when a Table is requested by the caller.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message TableOptions {
+  // includeObject decides whether to include each object along with its columnar information.
+  // Specifying "None" will return no object, specifying "Object" will return the full object contents, and
+  // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
+  // in version v1beta1 of the meta.k8s.io API group.
+  optional string includeObject = 1;
+}
+
+// Time is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON.  Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message Time {
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  optional int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive. This field may be limited in precision depending on context.
+  optional int32 nanos = 2;
+}
+
+// Timestamp is a struct that is equivalent to Time, but intended for
+// protobuf marshalling/unmarshalling. It is generated into a serialization
+// that matches Time. Do not use in Go structs.
+message Timestamp {
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  optional int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive. This field may be limited in precision depending on context.
+  optional int32 nanos = 2;
+}
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+//
+// +k8s:deepcopy-gen=false
+message TypeMeta {
+  // Kind is a string value representing the REST resource this object represents.
+  // Servers may infer this from the endpoint the client submits requests to.
+  // Cannot be updated.
+  // In CamelCase.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional string kind = 1;
+
+  // APIVersion defines the versioned schema of this representation of an object.
+  // Servers should convert recognized schemas to the latest internal value, and
+  // may reject unrecognized values.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+  // +optional
+  optional string apiVersion = 2;
+}
+
+// UpdateOptions may be provided when updating an API object.
+// All fields in UpdateOptions should also be present in PatchOptions.
+message UpdateOptions {
+  // When present, indicates that modifications should not be
+  // persisted. An invalid or unrecognized dryRun directive will
+  // result in an error response and no further processing of the
+  // request. Valid values are:
+  // - All: all dry run stages will be processed
+  // +optional
+  repeated string dryRun = 1;
+
+  // fieldManager is a name associated with the actor or entity
+  // that is making these changes. The value must be less than or
+  // 128 characters long, and only contain printable characters,
+  // as defined by https://golang.org/pkg/unicode/#IsPrint.
+  // +optional
+  optional string fieldManager = 2;
+}
+
+// Verbs masks the value so protobuf can generate
+//
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message Verbs {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
+// Event represents a single event to a watched resource.
+//
+// +protobuf=true
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message WatchEvent {
+  optional string type = 1;
+
+  // Object is:
+  //  * If Type is Added or Modified: the new state of the object.
+  //  * If Type is Deleted: the state of the object immediately before deletion.
+  //  * If Type is Error: *Status is recommended; other types may make sense
+  //    depending on context.
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go
new file mode 100644
index 0000000..bd4c6d9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go
@@ -0,0 +1,148 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupResource specifies a Group and a Resource, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupResource struct {
+	Group    string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
+}
+
+func (gr *GroupResource) String() string {
+	if len(gr.Group) == 0 {
+		return gr.Resource
+	}
+	return gr.Resource + "." + gr.Group
+}
+
+// GroupVersionResource unambiguously identifies a resource.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersionResource struct {
+	Group    string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Version  string `json:"version" protobuf:"bytes,2,opt,name=version"`
+	Resource string `json:"resource" protobuf:"bytes,3,opt,name=resource"`
+}
+
+func (gvr *GroupVersionResource) String() string {
+	return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "")
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupKind struct {
+	Group string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Kind  string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+}
+
+func (gk *GroupKind) String() string {
+	if len(gk.Group) == 0 {
+		return gk.Kind
+	}
+	return gk.Kind + "." + gk.Group
+}
+
+// GroupVersionKind unambiguously identifies a kind.  It doesn't anonymously include GroupVersion
+// to avoid automatic coersion.  It doesn't use a GroupVersion to avoid custom marshalling
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersionKind struct {
+	Group   string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Version string `json:"version" protobuf:"bytes,2,opt,name=version"`
+	Kind    string `json:"kind" protobuf:"bytes,3,opt,name=kind"`
+}
+
+func (gvk GroupVersionKind) String() string {
+	return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type GroupVersion struct {
+	Group   string `json:"group" protobuf:"bytes,1,opt,name=group"`
+	Version string `json:"version" protobuf:"bytes,2,opt,name=version"`
+}
+
+// Empty returns true if group and version are empty
+func (gv GroupVersion) Empty() bool {
+	return len(gv.Group) == 0 && len(gv.Version) == 0
+}
+
+// String puts "group" and "version" into a single "group/version" string. For the legacy v1
+// it returns "v1".
+func (gv GroupVersion) String() string {
+	// special case the internal apiVersion for the legacy kube types
+	if gv.Empty() {
+		return ""
+	}
+
+	// special case of "v1" for backward compatibility
+	if len(gv.Group) == 0 && gv.Version == "v1" {
+		return gv.Version
+	}
+	if len(gv.Group) > 0 {
+		return gv.Group + "/" + gv.Version
+	}
+	return gv.Version
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (gv GroupVersion) MarshalJSON() ([]byte, error) {
+	s := gv.String()
+	if strings.Count(s, "/") > 1 {
+		return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s)
+	}
+	return json.Marshal(s)
+}
+
+func (gv *GroupVersion) unmarshal(value []byte) error {
+	var s string
+	if err := json.Unmarshal(value, &s); err != nil {
+		return err
+	}
+	parsed, err := schema.ParseGroupVersion(s)
+	if err != nil {
+		return err
+	}
+	gv.Group, gv.Version = parsed.Group, parsed.Version
+	return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (gv *GroupVersion) UnmarshalJSON(value []byte) error {
+	return gv.unmarshal(value)
+}
+
+// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface.
+func (gv *GroupVersion) UnmarshalText(value []byte) error {
+	return gv.unmarshal(value)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
new file mode 100644
index 0000000..ad989ad
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
@@ -0,0 +1,282 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/selection"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements
+// labels.Selector
+// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go
+func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {
+	if ps == nil {
+		return labels.Nothing(), nil
+	}
+	if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {
+		return labels.Everything(), nil
+	}
+	selector := labels.NewSelector()
+	for k, v := range ps.MatchLabels {
+		r, err := labels.NewRequirement(k, selection.Equals, []string{v})
+		if err != nil {
+			return nil, err
+		}
+		selector = selector.Add(*r)
+	}
+	for _, expr := range ps.MatchExpressions {
+		var op selection.Operator
+		switch expr.Operator {
+		case LabelSelectorOpIn:
+			op = selection.In
+		case LabelSelectorOpNotIn:
+			op = selection.NotIn
+		case LabelSelectorOpExists:
+			op = selection.Exists
+		case LabelSelectorOpDoesNotExist:
+			op = selection.DoesNotExist
+		default:
+			return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator)
+		}
+		r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...))
+		if err != nil {
+			return nil, err
+		}
+		selector = selector.Add(*r)
+	}
+	return selector, nil
+}
+
+// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the
+// original structure of a label selector. Operators that cannot be converted into plain
+// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in
+// an error.
+func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) {
+	if ps == nil {
+		return nil, nil
+	}
+	selector := map[string]string{}
+	for k, v := range ps.MatchLabels {
+		selector[k] = v
+	}
+	for _, expr := range ps.MatchExpressions {
+		switch expr.Operator {
+		case LabelSelectorOpIn:
+			if len(expr.Values) != 1 {
+				return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator)
+			}
+			// Should we do anything in case this will override a previous key-value pair?
+			selector[expr.Key] = expr.Values[0]
+		case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist:
+			return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator)
+		default:
+			return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator)
+		}
+	}
+	return selector, nil
+}
+
+// ParseToLabelSelector parses a string representing a selector into a LabelSelector object.
+// Note: This function should be kept in sync with the parser in pkg/labels/selector.go
+func ParseToLabelSelector(selector string) (*LabelSelector, error) {
+	reqs, err := labels.ParseToRequirements(selector)
+	if err != nil {
+		return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err)
+	}
+
+	labelSelector := &LabelSelector{
+		MatchLabels:      map[string]string{},
+		MatchExpressions: []LabelSelectorRequirement{},
+	}
+	for _, req := range reqs {
+		var op LabelSelectorOperator
+		switch req.Operator() {
+		case selection.Equals, selection.DoubleEquals:
+			vals := req.Values()
+			if vals.Len() != 1 {
+				return nil, fmt.Errorf("equals operator must have exactly one value")
+			}
+			val, ok := vals.PopAny()
+			if !ok {
+				return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved")
+			}
+			labelSelector.MatchLabels[req.Key()] = val
+			continue
+		case selection.In:
+			op = LabelSelectorOpIn
+		case selection.NotIn:
+			op = LabelSelectorOpNotIn
+		case selection.Exists:
+			op = LabelSelectorOpExists
+		case selection.DoesNotExist:
+			op = LabelSelectorOpDoesNotExist
+		case selection.GreaterThan, selection.LessThan:
+			// Adding a separate case for these operators to indicate that this is deliberate
+			return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator())
+		default:
+			return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator())
+		}
+		labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{
+			Key:      req.Key(),
+			Operator: op,
+			Values:   req.Values().List(),
+		})
+	}
+	return labelSelector, nil
+}
+
+// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object.
+func SetAsLabelSelector(ls labels.Set) *LabelSelector {
+	if ls == nil {
+		return nil
+	}
+
+	selector := &LabelSelector{
+		MatchLabels: make(map[string]string),
+	}
+	for label, value := range ls {
+		selector.MatchLabels[label] = value
+	}
+
+	return selector
+}
+
+// FormatLabelSelector convert labelSelector into plain string
+func FormatLabelSelector(labelSelector *LabelSelector) string {
+	selector, err := LabelSelectorAsSelector(labelSelector)
+	if err != nil {
+		return "<error>"
+	}
+
+	l := selector.String()
+	if len(l) == 0 {
+		l = "<none>"
+	}
+	return l
+}
+
+func ExtractGroupVersions(l *APIGroupList) []string {
+	var groupVersions []string
+	for _, g := range l.Groups {
+		for _, gv := range g.Versions {
+			groupVersions = append(groupVersions, gv.GroupVersion)
+		}
+	}
+	return groupVersions
+}
+
+// HasAnnotation returns a bool if passed in annotation exists
+func HasAnnotation(obj ObjectMeta, ann string) bool {
+	_, found := obj.Annotations[ann]
+	return found
+}
+
+// SetMetaDataAnnotation sets the annotation and value
+func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) {
+	if obj.Annotations == nil {
+		obj.Annotations = make(map[string]string)
+	}
+	obj.Annotations[ann] = value
+}
+
+// SingleObject returns a ListOptions for watching a single object.
+func SingleObject(meta ObjectMeta) ListOptions {
+	return ListOptions{
+		FieldSelector:   fields.OneTermEqualSelector("metadata.name", meta.Name).String(),
+		ResourceVersion: meta.ResourceVersion,
+	}
+}
+
+// NewDeleteOptions returns a DeleteOptions indicating the resource should
+// be deleted within the specified grace period. Use zero to indicate
+// immediate deletion. If you would prefer to use the default grace period,
+// use &metav1.DeleteOptions{} directly.
+func NewDeleteOptions(grace int64) *DeleteOptions {
+	return &DeleteOptions{GracePeriodSeconds: &grace}
+}
+
+// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set.
+func NewPreconditionDeleteOptions(uid string) *DeleteOptions {
+	u := types.UID(uid)
+	p := Preconditions{UID: &u}
+	return &DeleteOptions{Preconditions: &p}
+}
+
+// NewUIDPreconditions returns a Preconditions with UID set.
+func NewUIDPreconditions(uid string) *Preconditions {
+	u := types.UID(uid)
+	return &Preconditions{UID: &u}
+}
+
+// NewRVDeletionPrecondition returns a DeleteOptions with a ResourceVersion precondition set.
+func NewRVDeletionPrecondition(rv string) *DeleteOptions {
+	p := Preconditions{ResourceVersion: &rv}
+	return &DeleteOptions{Preconditions: &p}
+}
+
+// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values.
+func HasObjectMetaSystemFieldValues(meta Object) bool {
+	return !meta.GetCreationTimestamp().Time.IsZero() ||
+		len(meta.GetUID()) != 0
+}
+
+// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields
+// for a pre-existing object. This is opt-in for new objects with Status subresource.
+func ResetObjectMetaForStatus(meta, existingMeta Object) {
+	meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp())
+	meta.SetGeneration(existingMeta.GetGeneration())
+	meta.SetSelfLink(existingMeta.GetSelfLink())
+	meta.SetLabels(existingMeta.GetLabels())
+	meta.SetAnnotations(existingMeta.GetAnnotations())
+	meta.SetFinalizers(existingMeta.GetFinalizers())
+	meta.SetOwnerReferences(existingMeta.GetOwnerReferences())
+	// managedFields must be preserved since it's been modified to
+	// track changed fields in the status update.
+	//meta.SetManagedFields(existingMeta.GetManagedFields())
+}
+
+// MarshalJSON implements json.Marshaler
+// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (f FieldsV1) MarshalJSON() ([]byte, error) {
+	if f.Raw == nil {
+		return []byte("null"), nil
+	}
+	return f.Raw, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler
+func (f *FieldsV1) UnmarshalJSON(b []byte) error {
+	if f == nil {
+		return errors.New("metav1.Fields: UnmarshalJSON on nil pointer")
+	}
+	if !bytes.Equal(b, []byte("null")) {
+		f.Raw = append(f.Raw[0:0], b...)
+	}
+	return nil
+}
+
+var _ json.Marshaler = FieldsV1{}
+var _ json.Unmarshaler = &FieldsV1{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go
new file mode 100644
index 0000000..9b45145
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// Clones the given selector and returns a new selector with the given key and value added.
+// Returns the given selector, if labelKey is empty.
+func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue string) *LabelSelector {
+	if labelKey == "" {
+		// Don't need to add a label.
+		return selector
+	}
+
+	// Clone.
+	newSelector := selector.DeepCopy()
+
+	if newSelector.MatchLabels == nil {
+		newSelector.MatchLabels = make(map[string]string)
+	}
+
+	newSelector.MatchLabels[labelKey] = labelValue
+
+	return newSelector
+}
+
+// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels.
+func AddLabelToSelector(selector *LabelSelector, labelKey, labelValue string) *LabelSelector {
+	if labelKey == "" {
+		// Don't need to add a label.
+		return selector
+	}
+	if selector.MatchLabels == nil {
+		selector.MatchLabels = make(map[string]string)
+	}
+	selector.MatchLabels[labelKey] = labelValue
+	return selector
+}
+
+// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels
+func SelectorHasLabel(selector *LabelSelector, labelKey string) bool {
+	return len(selector.MatchLabels[labelKey]) > 0
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
new file mode 100644
index 0000000..2002f91
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// TODO: move this, Object, List, and Type to a different package
+type ObjectMetaAccessor interface {
+	GetObjectMeta() Object
+}
+
+// Object lets you work with object metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field (Name, UID, Namespace on lists) will be a no-op and return
+// a default value.
+type Object interface {
+	GetNamespace() string
+	SetNamespace(namespace string)
+	GetName() string
+	SetName(name string)
+	GetGenerateName() string
+	SetGenerateName(name string)
+	GetUID() types.UID
+	SetUID(uid types.UID)
+	GetResourceVersion() string
+	SetResourceVersion(version string)
+	GetGeneration() int64
+	SetGeneration(generation int64)
+	GetSelfLink() string
+	SetSelfLink(selfLink string)
+	GetCreationTimestamp() Time
+	SetCreationTimestamp(timestamp Time)
+	GetDeletionTimestamp() *Time
+	SetDeletionTimestamp(timestamp *Time)
+	GetDeletionGracePeriodSeconds() *int64
+	SetDeletionGracePeriodSeconds(*int64)
+	GetLabels() map[string]string
+	SetLabels(labels map[string]string)
+	GetAnnotations() map[string]string
+	SetAnnotations(annotations map[string]string)
+	GetFinalizers() []string
+	SetFinalizers(finalizers []string)
+	GetOwnerReferences() []OwnerReference
+	SetOwnerReferences([]OwnerReference)
+	GetClusterName() string
+	SetClusterName(clusterName string)
+	GetManagedFields() []ManagedFieldsEntry
+	SetManagedFields(managedFields []ManagedFieldsEntry)
+}
+
+// ListMetaAccessor retrieves the list interface from an object
+type ListMetaAccessor interface {
+	GetListMeta() ListInterface
+}
+
+// Common lets you work with core metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type Common interface {
+	GetResourceVersion() string
+	SetResourceVersion(version string)
+	GetSelfLink() string
+	SetSelfLink(selfLink string)
+}
+
+// ListInterface lets you work with list metadata from any of the versioned or
+// internal API objects. Attempting to set or retrieve a field on an object that does
+// not support that field will be a no-op and return a default value.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type ListInterface interface {
+	GetResourceVersion() string
+	SetResourceVersion(version string)
+	GetSelfLink() string
+	SetSelfLink(selfLink string)
+	GetContinue() string
+	SetContinue(c string)
+	GetRemainingItemCount() *int64
+	SetRemainingItemCount(c *int64)
+}
+
+// Type exposes the type and APIVersion of versioned or internal API objects.
+// TODO: move this, and TypeMeta and ListMeta, to a different package
+type Type interface {
+	GetAPIVersion() string
+	SetAPIVersion(version string)
+	GetKind() string
+	SetKind(kind string)
+}
+
+var _ ListInterface = &ListMeta{}
+
+func (meta *ListMeta) GetResourceVersion() string        { return meta.ResourceVersion }
+func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
+func (meta *ListMeta) GetSelfLink() string               { return meta.SelfLink }
+func (meta *ListMeta) SetSelfLink(selfLink string)       { meta.SelfLink = selfLink }
+func (meta *ListMeta) GetContinue() string               { return meta.Continue }
+func (meta *ListMeta) SetContinue(c string)              { meta.Continue = c }
+func (meta *ListMeta) GetRemainingItemCount() *int64     { return meta.RemainingItemCount }
+func (meta *ListMeta) SetRemainingItemCount(c *int64)    { meta.RemainingItemCount = c }
+
+func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
+
+// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *ListMeta) GetListMeta() ListInterface { return obj }
+
+func (obj *ObjectMeta) GetObjectMeta() Object { return obj }
+
+// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows
+// fast, direct access to metadata fields for API objects.
+func (meta *ObjectMeta) GetNamespace() string                { return meta.Namespace }
+func (meta *ObjectMeta) SetNamespace(namespace string)       { meta.Namespace = namespace }
+func (meta *ObjectMeta) GetName() string                     { return meta.Name }
+func (meta *ObjectMeta) SetName(name string)                 { meta.Name = name }
+func (meta *ObjectMeta) GetGenerateName() string             { return meta.GenerateName }
+func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName }
+func (meta *ObjectMeta) GetUID() types.UID                   { return meta.UID }
+func (meta *ObjectMeta) SetUID(uid types.UID)                { meta.UID = uid }
+func (meta *ObjectMeta) GetResourceVersion() string          { return meta.ResourceVersion }
+func (meta *ObjectMeta) SetResourceVersion(version string)   { meta.ResourceVersion = version }
+func (meta *ObjectMeta) GetGeneration() int64                { return meta.Generation }
+func (meta *ObjectMeta) SetGeneration(generation int64)      { meta.Generation = generation }
+func (meta *ObjectMeta) GetSelfLink() string                 { return meta.SelfLink }
+func (meta *ObjectMeta) SetSelfLink(selfLink string)         { meta.SelfLink = selfLink }
+func (meta *ObjectMeta) GetCreationTimestamp() Time          { return meta.CreationTimestamp }
+func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp Time) {
+	meta.CreationTimestamp = creationTimestamp
+}
+func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimestamp }
+func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) {
+	meta.DeletionTimestamp = deletionTimestamp
+}
+func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 {
+	return meta.DeletionGracePeriodSeconds
+}
+func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
+	meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds
+}
+func (meta *ObjectMeta) GetLabels() map[string]string                 { return meta.Labels }
+func (meta *ObjectMeta) SetLabels(labels map[string]string)           { meta.Labels = labels }
+func (meta *ObjectMeta) GetAnnotations() map[string]string            { return meta.Annotations }
+func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
+func (meta *ObjectMeta) GetFinalizers() []string                      { return meta.Finalizers }
+func (meta *ObjectMeta) SetFinalizers(finalizers []string)            { meta.Finalizers = finalizers }
+func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference         { return meta.OwnerReferences }
+func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) {
+	meta.OwnerReferences = references
+}
+func (meta *ObjectMeta) GetClusterName() string                 { return meta.ClusterName }
+func (meta *ObjectMeta) SetClusterName(clusterName string)      { meta.ClusterName = clusterName }
+func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields }
+func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) {
+	meta.ManagedFields = managedFields
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
new file mode 100644
index 0000000..cdd9a6a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"time"
+
+	"github.com/google/gofuzz"
+)
+
+const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00"
+
+// MicroTime is version of Time with microsecond level precision.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type MicroTime struct {
+	time.Time `protobuf:"-"`
+}
+
+// DeepCopy returns a deep-copy of the MicroTime value.  The underlying time.Time
+// type is effectively immutable in the time API, so it is safe to
+// copy-by-assign, despite the presence of (unexported) Pointer fields.
+func (t *MicroTime) DeepCopyInto(out *MicroTime) {
+	*out = *t
+}
+
+// NewMicroTime returns a wrapped instance of the provided time
+func NewMicroTime(time time.Time) MicroTime {
+	return MicroTime{time}
+}
+
+// DateMicro returns the MicroTime corresponding to the supplied parameters
+// by wrapping time.Date.
+func DateMicro(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) MicroTime {
+	return MicroTime{time.Date(year, month, day, hour, min, sec, nsec, loc)}
+}
+
+// NowMicro returns the current local time.
+func NowMicro() MicroTime {
+	return MicroTime{time.Now()}
+}
+
+// IsZero returns true if the value is nil or time is zero.
+func (t *MicroTime) IsZero() bool {
+	if t == nil {
+		return true
+	}
+	return t.Time.IsZero()
+}
+
+// Before reports whether the time instant t is before u.
+func (t *MicroTime) Before(u *MicroTime) bool {
+	if t != nil && u != nil {
+		return t.Time.Before(u.Time)
+	}
+	return false
+}
+
+// Equal reports whether the time instant t is equal to u.
+func (t *MicroTime) Equal(u *MicroTime) bool {
+	if t == nil && u == nil {
+		return true
+	}
+	if t != nil && u != nil {
+		return t.Time.Equal(u.Time)
+	}
+	return false
+}
+
+// BeforeTime reports whether the time instant t is before second-lever precision u.
+func (t *MicroTime) BeforeTime(u *Time) bool {
+	if t != nil && u != nil {
+		return t.Time.Before(u.Time)
+	}
+	return false
+}
+
+// EqualTime reports whether the time instant t is equal to second-lever precision u.
+func (t *MicroTime) EqualTime(u *Time) bool {
+	if t == nil && u == nil {
+		return true
+	}
+	if t != nil && u != nil {
+		return t.Time.Equal(u.Time)
+	}
+	return false
+}
+
+// UnixMicro returns the local time corresponding to the given Unix time
+// by wrapping time.Unix.
+func UnixMicro(sec int64, nsec int64) MicroTime {
+	return MicroTime{time.Unix(sec, nsec)}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *MicroTime) UnmarshalJSON(b []byte) error {
+	if len(b) == 4 && string(b) == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	var str string
+	err := json.Unmarshal(b, &str)
+	if err != nil {
+		return err
+	}
+
+	pt, err := time.Parse(RFC3339Micro, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// UnmarshalQueryParameter converts from a URL query parameter value to an object
+func (t *MicroTime) UnmarshalQueryParameter(str string) error {
+	if len(str) == 0 {
+		t.Time = time.Time{}
+		return nil
+	}
+	// Tolerate requests from older clients that used JSON serialization to build query params
+	if len(str) == 4 && str == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	pt, err := time.Parse(RFC3339Micro, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t MicroTime) MarshalJSON() ([]byte, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as JSON's "null".
+		return []byte("null"), nil
+	}
+
+	return json.Marshal(t.UTC().Format(RFC3339Micro))
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ MicroTime) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ MicroTime) OpenAPISchemaFormat() string { return "date-time" }
+
+// MarshalQueryParameter converts to a URL query parameter value
+func (t MicroTime) MarshalQueryParameter() (string, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as an empty string
+		return "", nil
+	}
+
+	return t.UTC().Format(RFC3339Micro), nil
+}
+
+// Fuzz satisfies fuzz.Interface.
+func (t *MicroTime) Fuzz(c fuzz.Continue) {
+	if t == nil {
+		return
+	}
+	// Allow for about 1000 years of randomness. Accurate to a tenth of
+	// micro second. Leave off nanoseconds because JSON doesn't
+	// represent them so they can't round-trip properly.
+	t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000))
+}
+
+var _ fuzz.Interface = &MicroTime{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go
new file mode 100644
index 0000000..6dd6d89
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"time"
+)
+
+// Timestamp is declared in time_proto.go
+
+// Timestamp returns the Time as a new Timestamp value.
+func (m *MicroTime) ProtoMicroTime() *Timestamp {
+	if m == nil {
+		return &Timestamp{}
+	}
+	return &Timestamp{
+		Seconds: m.Time.Unix(),
+		Nanos:   int32(m.Time.Nanosecond()),
+	}
+}
+
+// Size implements the protobuf marshalling interface.
+func (m *MicroTime) Size() (n int) {
+	if m == nil || m.Time.IsZero() {
+		return 0
+	}
+	return m.ProtoMicroTime().Size()
+}
+
+// Reset implements the protobuf marshalling interface.
+func (m *MicroTime) Unmarshal(data []byte) error {
+	if len(data) == 0 {
+		m.Time = time.Time{}
+		return nil
+	}
+	p := Timestamp{}
+	if err := p.Unmarshal(data); err != nil {
+		return err
+	}
+	m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local()
+	return nil
+}
+
+// Marshal implements the protobuf marshalling interface.
+func (m *MicroTime) Marshal() (data []byte, err error) {
+	if m == nil || m.Time.IsZero() {
+		return nil, nil
+	}
+	return m.ProtoMicroTime().Marshal()
+}
+
+// MarshalTo implements the protobuf marshalling interface.
+func (m *MicroTime) MarshalTo(data []byte) (int, error) {
+	if m == nil || m.Time.IsZero() {
+		return 0, nil
+	}
+	return m.ProtoMicroTime().MarshalTo(data)
+}
+
+// MarshalToSizedBuffer implements the protobuf marshalling interface.
+func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) {
+	if m == nil || m.Time.IsZero() {
+		return 0, nil
+	}
+	return m.ProtoMicroTime().MarshalToSizedBuffer(data)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
new file mode 100644
index 0000000..c1a0771
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "meta.k8s.io"
+
+var (
+	// localSchemeBuilder is used to make compiler happy for autogenerated
+	// conversions. However, it's not used.
+	schemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &schemeBuilder
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Unversioned is group version for unversioned API objects
+// TODO: this should be v1 probably
+var Unversioned = schema.GroupVersion{Group: "", Version: "v1"}
+
+// WatchEventKind is name reserved for serializing watch events.
+const WatchEventKind = "WatchEvent"
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// scheme is the registry for the common types that adhere to the meta v1 API spec.
+var scheme = runtime.NewScheme()
+
+// ParameterCodec knows about query parameters used with the meta v1 API spec.
+var ParameterCodec = runtime.NewParameterCodec(scheme)
+
+var optionsTypes = []runtime.Object{
+	&ListOptions{},
+	&ExportOptions{},
+	&GetOptions{},
+	&DeleteOptions{},
+	&CreateOptions{},
+	&UpdateOptions{},
+	&PatchOptions{},
+}
+
+// AddToGroupVersion registers common meta types into schemas.
+func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) {
+	scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{})
+	scheme.AddKnownTypeWithName(
+		schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind),
+		&InternalEvent{},
+	)
+	// Supports legacy code paths, most callers should use metav1.ParameterCodec for now
+	scheme.AddKnownTypes(groupVersion, optionsTypes...)
+	// Register Unversioned types under their own special group
+	scheme.AddUnversionedTypes(Unversioned,
+		&Status{},
+		&APIVersions{},
+		&APIGroupList{},
+		&APIGroup{},
+		&APIResourceList{},
+	)
+
+	// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
+	utilruntime.Must(RegisterConversions(scheme))
+	utilruntime.Must(RegisterDefaults(scheme))
+}
+
+// AddMetaToScheme registers base meta types into schemas.
+func AddMetaToScheme(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Table{},
+		&TableOptions{},
+		&PartialObjectMetadata{},
+		&PartialObjectMetadataList{},
+	)
+
+	return nil
+}
+
+func init() {
+	scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...)
+
+	utilruntime.Must(AddMetaToScheme(scheme))
+
+	// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
+	utilruntime.Must(RegisterDefaults(scheme))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
new file mode 100644
index 0000000..4a1d89c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
@@ -0,0 +1,197 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"encoding/json"
+	"time"
+
+	fuzz "github.com/google/gofuzz"
+)
+
+// Time is a wrapper around time.Time which supports correct
+// marshaling to YAML and JSON.  Wrappers are provided for many
+// of the factory methods that the time package offers.
+//
+// +protobuf.options.marshal=false
+// +protobuf.as=Timestamp
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type Time struct {
+	time.Time `protobuf:"-"`
+}
+
+// DeepCopyInto creates a deep-copy of the Time value.  The underlying time.Time
+// type is effectively immutable in the time API, so it is safe to
+// copy-by-assign, despite the presence of (unexported) Pointer fields.
+func (t *Time) DeepCopyInto(out *Time) {
+	*out = *t
+}
+
+// NewTime returns a wrapped instance of the provided time
+func NewTime(time time.Time) Time {
+	return Time{time}
+}
+
+// Date returns the Time corresponding to the supplied parameters
+// by wrapping time.Date.
+func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time {
+	return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)}
+}
+
+// Now returns the current local time.
+func Now() Time {
+	return Time{time.Now()}
+}
+
+// IsZero returns true if the value is nil or time is zero.
+func (t *Time) IsZero() bool {
+	if t == nil {
+		return true
+	}
+	return t.Time.IsZero()
+}
+
+// Before reports whether the time instant t is before u.
+func (t *Time) Before(u *Time) bool {
+	if t != nil && u != nil {
+		return t.Time.Before(u.Time)
+	}
+	return false
+}
+
+// Equal reports whether the time instant t is equal to u.
+func (t *Time) Equal(u *Time) bool {
+	if t == nil && u == nil {
+		return true
+	}
+	if t != nil && u != nil {
+		return t.Time.Equal(u.Time)
+	}
+	return false
+}
+
+// Unix returns the local time corresponding to the given Unix time
+// by wrapping time.Unix.
+func Unix(sec int64, nsec int64) Time {
+	return Time{time.Unix(sec, nsec)}
+}
+
+// Rfc3339Copy returns a copy of the Time at second-level precision.
+func (t Time) Rfc3339Copy() Time {
+	copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339))
+	return Time{copied}
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+	if len(b) == 4 && string(b) == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	var str string
+	err := json.Unmarshal(b, &str)
+	if err != nil {
+		return err
+	}
+
+	pt, err := time.Parse(time.RFC3339, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// UnmarshalQueryParameter converts from a URL query parameter value to an object
+func (t *Time) UnmarshalQueryParameter(str string) error {
+	if len(str) == 0 {
+		t.Time = time.Time{}
+		return nil
+	}
+	// Tolerate requests from older clients that used JSON serialization to build query params
+	if len(str) == 4 && str == "null" {
+		t.Time = time.Time{}
+		return nil
+	}
+
+	pt, err := time.Parse(time.RFC3339, str)
+	if err != nil {
+		return err
+	}
+
+	t.Time = pt.Local()
+	return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as JSON's "null".
+		return []byte("null"), nil
+	}
+	buf := make([]byte, 0, len(time.RFC3339)+2)
+	buf = append(buf, '"')
+	// time cannot contain non escapable JSON characters
+	buf = t.UTC().AppendFormat(buf, time.RFC3339)
+	buf = append(buf, '"')
+	return buf, nil
+}
+
+// ToUnstructured implements the value.UnstructuredConverter interface.
+func (t Time) ToUnstructured() interface{} {
+	if t.IsZero() {
+		return nil
+	}
+	buf := make([]byte, 0, len(time.RFC3339))
+	buf = t.UTC().AppendFormat(buf, time.RFC3339)
+	return string(buf)
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (_ Time) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (_ Time) OpenAPISchemaFormat() string { return "date-time" }
+
+// MarshalQueryParameter converts to a URL query parameter value
+func (t Time) MarshalQueryParameter() (string, error) {
+	if t.IsZero() {
+		// Encode unset/nil objects as an empty string
+		return "", nil
+	}
+
+	return t.UTC().Format(time.RFC3339), nil
+}
+
+// Fuzz satisfies fuzz.Interface.
+func (t *Time) Fuzz(c fuzz.Continue) {
+	if t == nil {
+		return
+	}
+	// Allow for about 1000 years of randomness.  Leave off nanoseconds
+	// because JSON doesn't represent them so they can't round-trip
+	// properly.
+	t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0)
+}
+
+var _ fuzz.Interface = &Time{}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go
new file mode 100644
index 0000000..eac8d96
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"time"
+)
+
+// Timestamp is a struct that is equivalent to Time, but intended for
+// protobuf marshalling/unmarshalling. It is generated into a serialization
+// that matches Time. Do not use in Go structs.
+type Timestamp struct {
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive. This field may be limited in precision depending on context.
+	Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"`
+}
+
+// Timestamp returns the Time as a new Timestamp value.
+func (m *Time) ProtoTime() *Timestamp {
+	if m == nil {
+		return &Timestamp{}
+	}
+	return &Timestamp{
+		Seconds: m.Time.Unix(),
+		// leaving this here for the record.  our JSON only handled seconds, so this results in writes by
+		// protobuf clients storing values that aren't read by json clients, which results in unexpected
+		// field mutation, which fails various validation and equality code.
+		// Nanos:   int32(m.Time.Nanosecond()),
+	}
+}
+
+// Size implements the protobuf marshalling interface.
+func (m *Time) Size() (n int) {
+	if m == nil || m.Time.IsZero() {
+		return 0
+	}
+	return m.ProtoTime().Size()
+}
+
+// Reset implements the protobuf marshalling interface.
+func (m *Time) Unmarshal(data []byte) error {
+	if len(data) == 0 {
+		m.Time = time.Time{}
+		return nil
+	}
+	p := Timestamp{}
+	if err := p.Unmarshal(data); err != nil {
+		return err
+	}
+	// leaving this here for the record.  our JSON only handled seconds, so this results in writes by
+	// protobuf clients storing values that aren't read by json clients, which results in unexpected
+	// field mutation, which fails various validation and equality code.
+	// m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local()
+	m.Time = time.Unix(p.Seconds, int64(0)).Local()
+	return nil
+}
+
+// Marshal implements the protobuf marshaling interface.
+func (m *Time) Marshal() (data []byte, err error) {
+	if m == nil || m.Time.IsZero() {
+		return nil, nil
+	}
+	return m.ProtoTime().Marshal()
+}
+
+// MarshalTo implements the protobuf marshaling interface.
+func (m *Time) MarshalTo(data []byte) (int, error) {
+	if m == nil || m.Time.IsZero() {
+		return 0, nil
+	}
+	return m.ProtoTime().MarshalTo(data)
+}
+
+// MarshalToSizedBuffer implements the protobuf reverse marshaling interface.
+func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) {
+	if m == nil || m.Time.IsZero() {
+		return 0, nil
+	}
+	return m.ProtoTime().MarshalToSizedBuffer(data)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
new file mode 100644
index 0000000..bb57f2c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -0,0 +1,1413 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains API types that are common to all versions.
+//
+// The package contains two categories of types:
+// - external (serialized) types that lack their own version (e.g TypeMeta)
+// - internal (never-serialized) types that are needed by several different
+//   api groups, and so live here, to avoid duplication and/or import loops
+//   (e.g. LabelSelector).
+// In the future, we will probably move these categories of objects into
+// separate packages.
+package v1
+
+import (
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
+//
+// +k8s:deepcopy-gen=false
+type TypeMeta struct {
+	// Kind is a string value representing the REST resource this object represents.
+	// Servers may infer this from the endpoint the client submits requests to.
+	// Cannot be updated.
+	// In CamelCase.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+
+	// APIVersion defines the versioned schema of this representation of an object.
+	// Servers should convert recognized schemas to the latest internal value, and
+	// may reject unrecognized values.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
+}
+
+// ListMeta describes metadata that synthetic resources must have, including lists and
+// various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
+type ListMeta struct {
+	// selfLink is a URL representing this object.
+	// Populated by the system.
+	// Read-only.
+	//
+	// DEPRECATED
+	// Kubernetes will stop propagating this field in 1.20 release and the field is planned
+	// to be removed in 1.21 release.
+	// +optional
+	SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"`
+
+	// String that identifies the server's internal version of this object that
+	// can be used by clients to determine when objects have changed.
+	// Value must be treated as opaque by clients and passed unmodified back to the server.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"`
+
+	// continue may be set if the user set a limit on the number of items returned, and indicates that
+	// the server has more data available. The value is opaque and may be used to issue another request
+	// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
+	// consistent list may not be possible if the server configuration has changed or more than a few
+	// minutes have passed. The resourceVersion field returned when using this continue value will be
+	// identical to the value in the first response, unless you have received this token from an error
+	// message.
+	Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"`
+
+	// remainingItemCount is the number of subsequent items in the list which are not included in this
+	// list response. If the list request contained label or field selectors, then the number of
+	// remaining items is unknown and the field will be left unset and omitted during serialization.
+	// If the list is complete (either because it is not chunking or because this is the last chunk),
+	// then there are no more remaining items and this field will be left unset and omitted during
+	// serialization.
+	// Servers older than v1.15 do not set this field.
+	// The intended use of the remainingItemCount is *estimating* the size of a collection. Clients
+	// should not rely on the remainingItemCount to be set or to be exact.
+	// +optional
+	RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"`
+}
+
+// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here
+const (
+	FinalizerOrphanDependents string = "orphan"
+	FinalizerDeleteDependents string = "foregroundDeletion"
+)
+
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
+type ObjectMeta struct {
+	// Name must be unique within a namespace. Is required when creating resources, although
+	// some resources may allow a client to request the generation of an appropriate name
+	// automatically. Name is primarily intended for creation idempotence and configuration
+	// definition.
+	// Cannot be updated.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#names
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+	// GenerateName is an optional prefix, used by the server, to generate a unique
+	// name ONLY IF the Name field has not been provided.
+	// If this field is used, the name returned to the client will be different
+	// than the name passed. This value will also be combined with a unique suffix.
+	// The provided value has the same validation rules as the Name field,
+	// and may be truncated by the length of the suffix required to make the value
+	// unique on the server.
+	//
+	// If this field is specified and the generated name exists, the server will
+	// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
+	// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
+	// should retry (optionally after the time indicated in the Retry-After header).
+	//
+	// Applied only if Name is not specified.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
+	// +optional
+	GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
+
+	// Namespace defines the space within which each name must be unique. An empty namespace is
+	// equivalent to the "default" namespace, but "default" is the canonical representation.
+	// Not all objects are required to be scoped to a namespace - the value of this field for
+	// those objects will be empty.
+	//
+	// Must be a DNS_LABEL.
+	// Cannot be updated.
+	// More info: http://kubernetes.io/docs/user-guide/namespaces
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+
+	// SelfLink is a URL representing this object.
+	// Populated by the system.
+	// Read-only.
+	//
+	// DEPRECATED
+	// Kubernetes will stop propagating this field in 1.20 release and the field is planned
+	// to be removed in 1.21 release.
+	// +optional
+	SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
+
+	// UID is the unique in time and space value for this object. It is typically generated by
+	// the server on successful creation of a resource and is not allowed to change on PUT
+	// operations.
+	//
+	// Populated by the system.
+	// Read-only.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"`
+
+	// An opaque value that represents the internal version of this object that can
+	// be used by clients to determine when objects have changed. May be used for optimistic
+	// concurrency, change detection, and the watch operation on a resource or set of resources.
+	// Clients must treat these values as opaque and passed unmodified back to the server.
+	// They may only be valid for a particular resource or set of resources.
+	//
+	// Populated by the system.
+	// Read-only.
+	// Value must be treated as opaque by clients and .
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
+
+	// A sequence number representing a specific generation of the desired state.
+	// Populated by the system. Read-only.
+	// +optional
+	Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
+
+	// CreationTimestamp is a timestamp representing the server time when this object was
+	// created. It is not guaranteed to be set in happens-before order across separate operations.
+	// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+	//
+	// Populated by the system.
+	// Read-only.
+	// Null for lists.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
+
+	// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
+	// field is set by the server when a graceful deletion is requested by the user, and is not
+	// directly settable by a client. The resource is expected to be deleted (no longer visible
+	// from resource lists, and not reachable by name) after the time in this field, once the
+	// finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
+	// Once the deletionTimestamp is set, this value may not be unset or be set further into the
+	// future, although it may be shortened or the resource may be deleted prior to this time.
+	// For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
+	// by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
+	// the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
+	// remove the pod from the API. In the presence of network partitions, this object may still
+	// exist after this timestamp, until an administrator or automated process can determine the
+	// resource is fully terminated.
+	// If not set, graceful deletion of the object has not been requested.
+	//
+	// Populated by the system when a graceful deletion is requested.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
+
+	// Number of seconds allowed for this object to gracefully terminate before
+	// it will be removed from the system. Only set when deletionTimestamp is also set.
+	// May only be shortened.
+	// Read-only.
+	// +optional
+	DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
+
+	// Map of string keys and values that can be used to organize and categorize
+	// (scope and select) objects. May match selectors of replication controllers
+	// and services.
+	// More info: http://kubernetes.io/docs/user-guide/labels
+	// +optional
+	Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
+
+	// Annotations is an unstructured key value map stored with a resource that may be
+	// set by external tools to store and retrieve arbitrary metadata. They are not
+	// queryable and should be preserved when modifying objects.
+	// More info: http://kubernetes.io/docs/user-guide/annotations
+	// +optional
+	Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
+
+	// List of objects depended by this object. If ALL objects in the list have
+	// been deleted, this object will be garbage collected. If this object is managed by a controller,
+	// then an entry in this list will point to this controller, with the controller field set to true.
+	// There cannot be more than one managing controller.
+	// +optional
+	// +patchMergeKey=uid
+	// +patchStrategy=merge
+	OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
+
+	// Must be empty before the object is deleted from the registry. Each entry
+	// is an identifier for the responsible component that will remove the entry
+	// from the list. If the deletionTimestamp of the object is non-nil, entries
+	// in this list can only be removed.
+	// Finalizers may be processed and removed in any order.  Order is NOT enforced
+	// because it introduces significant risk of stuck finalizers.
+	// finalizers is a shared field, any actor with permission can reorder it.
+	// If the finalizer list is processed in order, then this can lead to a situation
+	// in which the component responsible for the first finalizer in the list is
+	// waiting for a signal (field value, external system, or other) produced by a
+	// component responsible for a finalizer later in the list, resulting in a deadlock.
+	// Without enforced ordering finalizers are free to order amongst themselves and
+	// are not vulnerable to ordering changes in the list.
+	// +optional
+	// +patchStrategy=merge
+	Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
+
+	// The name of the cluster which the object belongs to.
+	// This is used to distinguish resources with same name and namespace in different clusters.
+	// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
+	// +optional
+	ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
+
+	// ManagedFields maps workflow-id and version to the set of fields
+	// that are managed by that workflow. This is mostly for internal
+	// housekeeping, and users typically shouldn't need to set or
+	// understand this field. A workflow can be the user's name, a
+	// controller's name, or the name of a specific apply path like
+	// "ci-cd". The set of fields is always in the version that the
+	// workflow used when modifying the object.
+	//
+	// +optional
+	ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"`
+}
+
+const (
+	// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
+	NamespaceDefault string = "default"
+	// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
+	NamespaceAll string = ""
+	// NamespaceNone is the argument for a context when there is no namespace.
+	NamespaceNone string = ""
+	// NamespaceSystem is the system namespace where we place system components.
+	NamespaceSystem string = "kube-system"
+	// NamespacePublic is the namespace where we place public info (ConfigMaps)
+	NamespacePublic string = "kube-public"
+)
+
+// OwnerReference contains enough information to let you identify an owning
+// object. An owning object must be in the same namespace as the dependent, or
+// be cluster-scoped, so there is no namespace field.
+type OwnerReference struct {
+	// API version of the referent.
+	APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"`
+	// Kind of the referent.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+	// Name of the referent.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#names
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+	// UID of the referent.
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+	UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// If true, this reference points to the managing controller.
+	// +optional
+	Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"`
+	// If true, AND if the owner has the "foregroundDeletion" finalizer, then
+	// the owner cannot be deleted from the key-value store until this
+	// reference is removed.
+	// Defaults to false.
+	// To set this field, a user needs "delete" permission of the owner,
+	// otherwise 422 (Unprocessable Entity) will be returned.
+	// +optional
+	BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ListOptions is the query options to a standard REST list call.
+type ListOptions struct {
+	TypeMeta `json:",inline"`
+
+	// A selector to restrict the list of returned objects by their labels.
+	// Defaults to everything.
+	// +optional
+	LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+	// A selector to restrict the list of returned objects by their fields.
+	// Defaults to everything.
+	// +optional
+	FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
+
+	// +k8s:deprecated=includeUninitialized,protobuf=6
+
+	// Watch for changes to the described resources and return them as a stream of
+	// add, update, and remove notifications. Specify resourceVersion.
+	// +optional
+	Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
+	// allowWatchBookmarks requests watch events with type "BOOKMARK".
+	// Servers that do not implement bookmarks may ignore this flag and
+	// bookmarks are sent at the server's discretion. Clients should not
+	// assume bookmarks are returned at any specific interval, nor may they
+	// assume the server will send any BOOKMARK event during a session.
+	// If this is not a watch, this field is ignored.
+	// If the feature gate WatchBookmarks is not enabled in apiserver,
+	// this field is ignored.
+	// +optional
+	AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"`
+
+	// resourceVersion sets a constraint on what resource versions a request may be served from.
+	// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+	// details.
+	//
+	// Defaults to unset
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+
+	// resourceVersionMatch determines how resourceVersion is applied to list calls.
+	// It is highly recommended that resourceVersionMatch be set for list calls where
+	// resourceVersion is set
+	// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+	// details.
+	//
+	// Defaults to unset
+	// +optional
+	ResourceVersionMatch ResourceVersionMatch `json:"resourceVersionMatch,omitempty" protobuf:"bytes,10,opt,name=resourceVersionMatch,casttype=ResourceVersionMatch"`
+	// Timeout for the list/watch call.
+	// This limits the duration of the call, regardless of any activity or inactivity.
+	// +optional
+	TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
+
+	// limit is a maximum number of responses to return for a list call. If more items exist, the
+	// server will set the `continue` field on the list metadata to a value that can be used with the
+	// same initial query to retrieve the next set of results. Setting a limit may return fewer than
+	// the requested amount of items (up to zero items) in the event all requested objects are
+	// filtered out and clients should only use the presence of the continue field to determine whether
+	// more results are available. Servers may choose not to support the limit argument and will return
+	// all of the available results. If limit is specified and the continue field is empty, clients may
+	// assume that no more results are available. This field is not supported if watch is true.
+	//
+	// The server guarantees that the objects returned when using continue will be identical to issuing
+	// a single list call without a limit - that is, no objects created, modified, or deleted after the
+	// first request is issued will be included in any subsequent continued requests. This is sometimes
+	// referred to as a consistent snapshot, and ensures that a client that is using limit to receive
+	// smaller chunks of a very large result can ensure they see all possible objects. If objects are
+	// updated during a chunked list the version of the object that was present at the time the first list
+	// result was calculated is returned.
+	Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"`
+	// The continue option should be set when retrieving more results from the server. Since this value is
+	// server defined, clients may only use the continue value from a previous query result with identical
+	// query parameters (except for the value of continue) and the server may reject a continue value it
+	// does not recognize. If the specified continue value is no longer valid whether due to expiration
+	// (generally five to fifteen minutes) or a configuration change on the server, the server will
+	// respond with a 410 ResourceExpired error together with a continue token. If the client needs a
+	// consistent list, it must restart their list without the continue field. Otherwise, the client may
+	// send another list request with the token received with the 410 error, the server will respond with
+	// a list starting from the next key, but from the latest snapshot, which is inconsistent from the
+	// previous list results - objects that are created, modified, or deleted after the first list request
+	// will be included in the response, as long as their keys are after the "next key".
+	//
+	// This field is not supported when watch is true. Clients may start a watch from the last
+	// resourceVersion value returned by the server and not miss any modifications.
+	Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"`
+}
+
+// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
+// may only be set if resourceVersion is also set.
+//
+// "NotOlderThan" matches data at least as new as the provided resourceVersion.
+// "Exact" matches data at the exact resourceVersion provided.
+//
+// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+// details.
+type ResourceVersionMatch string
+
+const (
+	// ResourceVersionMatchNotOlderThan matches data at least as new as the provided
+	// resourceVersion.
+	ResourceVersionMatchNotOlderThan ResourceVersionMatch = "NotOlderThan"
+	// ResourceVersionMatchExact matches data at the exact resourceVersion
+	// provided.
+	ResourceVersionMatchExact ResourceVersionMatch = "Exact"
+)
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExportOptions is the query options to the standard REST get call.
+// Deprecated. Planned for removal in 1.18.
+type ExportOptions struct {
+	TypeMeta `json:",inline"`
+	// Should this value be exported.  Export strips fields that a user can not specify.
+	// Deprecated. Planned for removal in 1.18.
+	Export bool `json:"export" protobuf:"varint,1,opt,name=export"`
+	// Should the export be exact.  Exact export maintains cluster-specific fields like 'Namespace'.
+	// Deprecated. Planned for removal in 1.18.
+	Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// GetOptions is the standard query options to the standard REST get call.
+type GetOptions struct {
+	TypeMeta `json:",inline"`
+	// resourceVersion sets a constraint on what resource versions a request may be served from.
+	// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+	// details.
+	//
+	// Defaults to unset
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"`
+	// +k8s:deprecated=includeUninitialized,protobuf=2
+}
+
+// DeletionPropagation decides if a deletion will propagate to the dependents of
+// the object, and how the garbage collector will handle the propagation.
+type DeletionPropagation string
+
+const (
+	// Orphans the dependents.
+	DeletePropagationOrphan DeletionPropagation = "Orphan"
+	// Deletes the object from the key-value store, the garbage collector will
+	// delete the dependents in the background.
+	DeletePropagationBackground DeletionPropagation = "Background"
+	// The object exists in the key-value store until the garbage collector
+	// deletes all the dependents whose ownerReference.blockOwnerDeletion=true
+	// from the key-value store.  API sever will put the "foregroundDeletion"
+	// finalizer on the object, and sets its deletionTimestamp.  This policy is
+	// cascading, i.e., the dependents will be deleted with Foreground.
+	DeletePropagationForeground DeletionPropagation = "Foreground"
+)
+
+const (
+	// DryRunAll means to complete all processing stages, but don't
+	// persist changes to storage.
+	DryRunAll = "All"
+)
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// DeleteOptions may be provided when deleting an API object.
+type DeleteOptions struct {
+	TypeMeta `json:",inline"`
+
+	// The duration in seconds before the object should be deleted. Value must be non-negative integer.
+	// The value zero indicates delete immediately. If this value is nil, the default grace period for the
+	// specified type will be used.
+	// Defaults to a per object value if not specified. zero means delete immediately.
+	// +optional
+	GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
+
+	// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+	// returned.
+	// +k8s:conversion-gen=false
+	// +optional
+	Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
+
+	// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
+	// Should the dependent objects be orphaned. If true/false, the "orphan"
+	// finalizer will be added to/removed from the object's finalizers list.
+	// Either this field or PropagationPolicy may be set, but not both.
+	// +optional
+	OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
+
+	// Whether and how garbage collection will be performed.
+	// Either this field or OrphanDependents may be set, but not both.
+	// The default policy is decided by the existing finalizer set in the
+	// metadata.finalizers and the resource-specific default policy.
+	// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
+	// allow the garbage collector to delete the dependents in the background;
+	// 'Foreground' - a cascading policy that deletes all dependents in the
+	// foreground.
+	// +optional
+	PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"`
+
+	// When present, indicates that modifications should not be
+	// persisted. An invalid or unrecognized dryRun directive will
+	// result in an error response and no further processing of the
+	// request. Valid values are:
+	// - All: all dry run stages will be processed
+	// +optional
+	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CreateOptions may be provided when creating an API object.
+type CreateOptions struct {
+	TypeMeta `json:",inline"`
+
+	// When present, indicates that modifications should not be
+	// persisted. An invalid or unrecognized dryRun directive will
+	// result in an error response and no further processing of the
+	// request. Valid values are:
+	// - All: all dry run stages will be processed
+	// +optional
+	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+	// +k8s:deprecated=includeUninitialized,protobuf=2
+
+	// fieldManager is a name associated with the actor or entity
+	// that is making these changes. The value must be less than or
+	// 128 characters long, and only contain printable characters,
+	// as defined by https://golang.org/pkg/unicode/#IsPrint.
+	// +optional
+	FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PatchOptions may be provided when patching an API object.
+// PatchOptions is meant to be a superset of UpdateOptions.
+type PatchOptions struct {
+	TypeMeta `json:",inline"`
+
+	// When present, indicates that modifications should not be
+	// persisted. An invalid or unrecognized dryRun directive will
+	// result in an error response and no further processing of the
+	// request. Valid values are:
+	// - All: all dry run stages will be processed
+	// +optional
+	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+
+	// Force is going to "force" Apply requests. It means user will
+	// re-acquire conflicting fields owned by other people. Force
+	// flag must be unset for non-apply patch requests.
+	// +optional
+	Force *bool `json:"force,omitempty" protobuf:"varint,2,opt,name=force"`
+
+	// fieldManager is a name associated with the actor or entity
+	// that is making these changes. The value must be less than or
+	// 128 characters long, and only contain printable characters,
+	// as defined by https://golang.org/pkg/unicode/#IsPrint. This
+	// field is required for apply requests
+	// (application/apply-patch) but optional for non-apply patch
+	// types (JsonPatch, MergePatch, StrategicMergePatch).
+	// +optional
+	FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// UpdateOptions may be provided when updating an API object.
+// All fields in UpdateOptions should also be present in PatchOptions.
+type UpdateOptions struct {
+	TypeMeta `json:",inline"`
+
+	// When present, indicates that modifications should not be
+	// persisted. An invalid or unrecognized dryRun directive will
+	// result in an error response and no further processing of the
+	// request. Valid values are:
+	// - All: all dry run stages will be processed
+	// +optional
+	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"`
+
+	// fieldManager is a name associated with the actor or entity
+	// that is making these changes. The value must be less than or
+	// 128 characters long, and only contain printable characters,
+	// as defined by https://golang.org/pkg/unicode/#IsPrint.
+	// +optional
+	FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"`
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+type Preconditions struct {
+	// Specifies the target UID.
+	// +optional
+	UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// Specifies the target ResourceVersion
+	// +optional
+	ResourceVersion *string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Status is a return value for calls that don't return other objects.
+type Status struct {
+	TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Status of the operation.
+	// One of: "Success" or "Failure".
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+	// A human-readable description of the status of this operation.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+	// A machine-readable description of why this operation is in the
+	// "Failure" status. If this value is empty there
+	// is no information available. A Reason clarifies an HTTP status
+	// code but does not override it.
+	// +optional
+	Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"`
+	// Extended data associated with the reason.  Each reason may define its
+	// own extended details. This field is optional and the data returned
+	// is not guaranteed to conform to any schema except that defined by
+	// the reason type.
+	// +optional
+	Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"`
+	// Suggested HTTP return code for this status, 0 if not set.
+	// +optional
+	Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"`
+}
+
+// StatusDetails is a set of additional properties that MAY be set by the
+// server to provide additional information about a response. The Reason
+// field of a Status object defines what attributes will be set. Clients
+// must ignore fields that do not match the defined type of each attribute,
+// and should assume that any attribute may be empty, invalid, or under
+// defined.
+type StatusDetails struct {
+	// The name attribute of the resource associated with the status StatusReason
+	// (when there is a single name which can be described).
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// The group attribute of the resource associated with the status StatusReason.
+	// +optional
+	Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"`
+	// The kind attribute of the resource associated with the status StatusReason.
+	// On some operations may differ from the requested resource Kind.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"`
+	// UID of the resource.
+	// (when there is a single resource which can be described).
+	// More info: http://kubernetes.io/docs/user-guide/identifiers#uids
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// The Causes array includes more details associated with the StatusReason
+	// failure. Not all StatusReasons may provide detailed causes.
+	// +optional
+	Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"`
+	// If specified, the time in seconds before the operation should be retried. Some errors may indicate
+	// the client must take an alternate action - for those errors this field may indicate how long to wait
+	// before taking the alternate action.
+	// +optional
+	RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"`
+}
+
+// Values of Status.Status
+const (
+	StatusSuccess = "Success"
+	StatusFailure = "Failure"
+)
+
+// StatusReason is an enumeration of possible failure causes.  Each StatusReason
+// must map to a single HTTP status code, but multiple reasons may map
+// to the same HTTP status code.
+// TODO: move to apiserver
+type StatusReason string
+
+const (
+	// StatusReasonUnknown means the server has declined to indicate a specific reason.
+	// The details field may contain other information about this error.
+	// Status code 500.
+	StatusReasonUnknown StatusReason = ""
+
+	// StatusReasonUnauthorized means the server can be reached and understood the request, but requires
+	// the user to present appropriate authorization credentials (identified by the WWW-Authenticate header)
+	// in order for the action to be completed. If the user has specified credentials on the request, the
+	// server considers them insufficient.
+	// Status code 401
+	StatusReasonUnauthorized StatusReason = "Unauthorized"
+
+	// StatusReasonForbidden means the server can be reached and understood the request, but refuses
+	// to take any further action.  It is the result of the server being configured to deny access for some reason
+	// to the requested resource by the client.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the forbidden resource
+	//                   on some operations may differ from the requested
+	//                   resource.
+	//   "id"   string - the identifier of the forbidden resource
+	// Status code 403
+	StatusReasonForbidden StatusReason = "Forbidden"
+
+	// StatusReasonNotFound means one or more resources required for this operation
+	// could not be found.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the missing resource
+	//                   on some operations may differ from the requested
+	//                   resource.
+	//   "id"   string - the identifier of the missing resource
+	// Status code 404
+	StatusReasonNotFound StatusReason = "NotFound"
+
+	// StatusReasonAlreadyExists means the resource you are creating already exists.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the conflicting resource
+	//   "id"   string - the identifier of the conflicting resource
+	// Status code 409
+	StatusReasonAlreadyExists StatusReason = "AlreadyExists"
+
+	// StatusReasonConflict means the requested operation cannot be completed
+	// due to a conflict in the operation. The client may need to alter the
+	// request. Each resource may define custom details that indicate the
+	// nature of the conflict.
+	// Status code 409
+	StatusReasonConflict StatusReason = "Conflict"
+
+	// StatusReasonGone means the item is no longer available at the server and no
+	// forwarding address is known.
+	// Status code 410
+	StatusReasonGone StatusReason = "Gone"
+
+	// StatusReasonInvalid means the requested create or update operation cannot be
+	// completed due to invalid data provided as part of the request. The client may
+	// need to alter the request. When set, the client may use the StatusDetails
+	// message field as a summary of the issues encountered.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the invalid resource
+	//   "id"   string - the identifier of the invalid resource
+	//   "causes"      - one or more StatusCause entries indicating the data in the
+	//                   provided resource that was invalid.  The code, message, and
+	//                   field attributes will be set.
+	// Status code 422
+	StatusReasonInvalid StatusReason = "Invalid"
+
+	// StatusReasonServerTimeout means the server can be reached and understood the request,
+	// but cannot complete the action in a reasonable time. The client should retry the request.
+	// This is may be due to temporary server load or a transient communication issue with
+	// another server. Status code 500 is used because the HTTP spec provides no suitable
+	// server-requested client retry and the 5xx class represents actionable errors.
+	// Details (optional):
+	//   "kind" string - the kind attribute of the resource being acted on.
+	//   "id"   string - the operation that is being attempted.
+	//   "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+	// Status code 500
+	StatusReasonServerTimeout StatusReason = "ServerTimeout"
+
+	// StatusReasonTimeout means that the request could not be completed within the given time.
+	// Clients can get this response only when they specified a timeout param in the request,
+	// or if the server cannot complete the operation within a reasonable amount of time.
+	// The request might succeed with an increased value of timeout param. The client *should*
+	// wait at least the number of seconds specified by the retryAfterSeconds field.
+	// Details (optional):
+	//   "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+	// Status code 504
+	StatusReasonTimeout StatusReason = "Timeout"
+
+	// StatusReasonTooManyRequests means the server experienced too many requests within a
+	// given window and that the client must wait to perform the action again. A client may
+	// always retry the request that led to this error, although the client should wait at least
+	// the number of seconds specified by the retryAfterSeconds field.
+	// Details (optional):
+	//   "retryAfterSeconds" int32 - the number of seconds before the operation should be retried
+	// Status code 429
+	StatusReasonTooManyRequests StatusReason = "TooManyRequests"
+
+	// StatusReasonBadRequest means that the request itself was invalid, because the request
+	// doesn't make any sense, for example deleting a read-only object.  This is different than
+	// StatusReasonInvalid above which indicates that the API call could possibly succeed, but the
+	// data was invalid.  API calls that return BadRequest can never succeed.
+	// Status code 400
+	StatusReasonBadRequest StatusReason = "BadRequest"
+
+	// StatusReasonMethodNotAllowed means that the action the client attempted to perform on the
+	// resource was not supported by the code - for instance, attempting to delete a resource that
+	// can only be created. API calls that return MethodNotAllowed can never succeed.
+	// Status code 405
+	StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed"
+
+	// StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable
+	// to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml.
+	// API calls that return NotAcceptable can never succeed.
+	// Status code 406
+	StatusReasonNotAcceptable StatusReason = "NotAcceptable"
+
+	// StatusReasonRequestEntityTooLarge means that the request entity is too large.
+	// Status code 413
+	StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge"
+
+	// StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable
+	// to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml.
+	// API calls that return UnsupportedMediaType can never succeed.
+	// Status code 415
+	StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType"
+
+	// StatusReasonInternalError indicates that an internal error occurred, it is unexpected
+	// and the outcome of the call is unknown.
+	// Details (optional):
+	//   "causes" - The original error
+	// Status code 500
+	StatusReasonInternalError StatusReason = "InternalError"
+
+	// StatusReasonExpired indicates that the request is invalid because the content you are requesting
+	// has expired and is no longer available. It is typically associated with watches that can't be
+	// serviced.
+	// Status code 410 (gone)
+	StatusReasonExpired StatusReason = "Expired"
+
+	// StatusReasonServiceUnavailable means that the request itself was valid,
+	// but the requested service is unavailable at this time.
+	// Retrying the request after some time might succeed.
+	// Status code 503
+	StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable"
+)
+
+// StatusCause provides more information about an api.Status failure, including
+// cases when multiple errors are encountered.
+type StatusCause struct {
+	// A machine-readable description of the cause of the error. If this value is
+	// empty there is no information available.
+	// +optional
+	Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"`
+	// A human-readable description of the cause of the error.  This field may be
+	// presented as-is to a reader.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+	// The field of the resource that has caused this error, as named by its JSON
+	// serialization. May include dot and postfix notation for nested attributes.
+	// Arrays are zero-indexed.  Fields may appear more than once in an array of
+	// causes due to fields having multiple errors.
+	// Optional.
+	//
+	// Examples:
+	//   "name" - the field "name" on the current resource
+	//   "items[0].name" - the field "name" on the first array entry in "items"
+	// +optional
+	Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"`
+}
+
+// CauseType is a machine readable value providing more detail about what
+// occurred in a status response. An operation may have multiple causes for a
+// status (whether Failure or Success).
+type CauseType string
+
+const (
+	// CauseTypeFieldValueNotFound is used to report failure to find a requested value
+	// (e.g. looking up an ID).
+	CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound"
+	// CauseTypeFieldValueRequired is used to report required values that are not
+	// provided (e.g. empty strings, null values, or empty arrays).
+	CauseTypeFieldValueRequired CauseType = "FieldValueRequired"
+	// CauseTypeFieldValueDuplicate is used to report collisions of values that must be
+	// unique (e.g. unique IDs).
+	CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate"
+	// CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex
+	// match).
+	CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid"
+	// CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules)
+	// values that can not be handled (e.g. an enumerated string).
+	CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported"
+	// CauseTypeUnexpectedServerResponse is used to report when the server responded to the client
+	// without the expected return type. The presence of this cause indicates the error may be
+	// due to an intervening proxy or the server software malfunctioning.
+	CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse"
+	// FieldManagerConflict is used to report when another client claims to manage this field,
+	// It should only be returned for a request using server-side apply.
+	CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict"
+	// CauseTypeResourceVersionTooLarge is used to report that the requested resource version
+	// is newer than the data observed by the API server, so the request cannot be served.
+	CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// List holds a list of objects, which may not be known by the server.
+type List struct {
+	TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of objects
+	Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// APIVersions lists the versions that are available, to allow clients to
+// discover the API at /api, which is the root path of the legacy v1 API.
+//
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type APIVersions struct {
+	TypeMeta `json:",inline"`
+	// versions are the api versions that are available.
+	Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"`
+	// a map of client CIDR to server address that is serving this group.
+	// This is to help clients reach servers in the most network-efficient way possible.
+	// Clients can use the appropriate server address as per the CIDR that they match.
+	// In case of multiple matches, clients should use the longest matching CIDR.
+	// The server returns only those CIDRs that it thinks that the client can match.
+	// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+	// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+	ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIGroupList is a list of APIGroup, to allow clients to discover the API at
+// /apis.
+type APIGroupList struct {
+	TypeMeta `json:",inline"`
+	// groups is a list of APIGroup.
+	Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIGroup contains the name, the supported versions, and the preferred version
+// of a group.
+type APIGroup struct {
+	TypeMeta `json:",inline"`
+	// name is the name of the group.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// versions are the versions supported in this group.
+	Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"`
+	// preferredVersion is the version preferred by the API server, which
+	// probably is the storage version.
+	// +optional
+	PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"`
+	// a map of client CIDR to server address that is serving this group.
+	// This is to help clients reach servers in the most network-efficient way possible.
+	// Clients can use the appropriate server address as per the CIDR that they match.
+	// In case of multiple matches, clients should use the longest matching CIDR.
+	// The server returns only those CIDRs that it thinks that the client can match.
+	// For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP.
+	// Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.
+	// +optional
+	ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"`
+}
+
+// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.
+type ServerAddressByClientCIDR struct {
+	// The CIDR with which clients can match their IP to figure out the server address that they should use.
+	ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"`
+	// Address of this server, suitable for a client that matches the above CIDR.
+	// This can be a hostname, hostname:port, IP or IP:port.
+	ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"`
+}
+
+// GroupVersion contains the "group/version" and "version" string of a version.
+// It is made a struct to keep extensibility.
+type GroupVersionForDiscovery struct {
+	// groupVersion specifies the API group and version in the form "group/version"
+	GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`
+	// version specifies the version in the form of "version". This is to save
+	// the clients the trouble of splitting the GroupVersion.
+	Version string `json:"version" protobuf:"bytes,2,opt,name=version"`
+}
+
+// APIResource specifies the name of a resource and whether it is namespaced.
+type APIResource struct {
+	// name is the plural name of the resource.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// singularName is the singular name of the resource.  This allows clients to handle plural and singular opaquely.
+	// The singularName is more correct for reporting status on a single item and both singular and plural are allowed
+	// from the kubectl CLI interface.
+	SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"`
+	// namespaced indicates if a resource is namespaced or not.
+	Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"`
+	// group is the preferred group of the resource.  Empty implies the group of the containing resource list.
+	// For subresources, this may have a different value, for example: Scale".
+	Group string `json:"group,omitempty" protobuf:"bytes,8,opt,name=group"`
+	// version is the preferred version of the resource.  Empty implies the version of the containing resource list
+	// For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
+	Version string `json:"version,omitempty" protobuf:"bytes,9,opt,name=version"`
+	// kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
+	Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"`
+	// verbs is a list of supported kube verbs (this includes get, list, watch, create,
+	// update, patch, delete, deletecollection, and proxy)
+	Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"`
+	// shortNames is a list of suggested short names of the resource.
+	ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"`
+	// categories is a list of the grouped resources this resource belongs to (e.g. 'all')
+	Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"`
+	// The hash value of the storage version, the version this resource is
+	// converted to when written to the data store. Value must be treated
+	// as opaque by clients. Only equality comparison on the value is valid.
+	// This is an alpha feature and may change or be removed in the future.
+	// The field is populated by the apiserver only if the
+	// StorageVersionHash feature gate is enabled.
+	// This field will remain optional even if it graduates.
+	// +optional
+	StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"`
+}
+
+// Verbs masks the value so protobuf can generate
+//
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type Verbs []string
+
+func (vs Verbs) String() string {
+	return fmt.Sprintf("%v", []string(vs))
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// APIResourceList is a list of APIResource, it is used to expose the name of the
+// resources supported in a specific group and version, and if the resource
+// is namespaced.
+type APIResourceList struct {
+	TypeMeta `json:",inline"`
+	// groupVersion is the group and version this APIResourceList is for.
+	GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"`
+	// resources contains the name of the resources and if they are namespaced.
+	APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"`
+}
+
+// RootPaths lists the paths available at root.
+// For example: "/healthz", "/apis".
+type RootPaths struct {
+	// paths are the paths available at root.
+	Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"`
+}
+
+// TODO: remove me when watch is refactored
+func LabelSelectorQueryParam(version string) string {
+	return "labelSelector"
+}
+
+// TODO: remove me when watch is refactored
+func FieldSelectorQueryParam(version string) string {
+	return "fieldSelector"
+}
+
+// String returns available api versions as a human-friendly version string.
+func (apiVersions APIVersions) String() string {
+	return strings.Join(apiVersions.Versions, ",")
+}
+
+func (apiVersions APIVersions) GoString() string {
+	return apiVersions.String()
+}
+
+// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.
+type Patch struct{}
+
+// Note:
+// There are two different styles of label selectors used in versioned types:
+// an older style which is represented as just a string in versioned types, and a
+// newer style that is structured.  LabelSelector is an internal representation for the
+// latter style.
+
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
+type LabelSelector struct {
+	// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+	// map is equivalent to an element of matchExpressions, whose key field is "key", the
+	// operator is "In", and the values array contains only "value". The requirements are ANDed.
+	// +optional
+	MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"`
+	// matchExpressions is a list of label selector requirements. The requirements are ANDed.
+	// +optional
+	MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"`
+}
+
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
+type LabelSelectorRequirement struct {
+	// key is the label key that the selector applies to.
+	// +patchMergeKey=key
+	// +patchStrategy=merge
+	Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
+	// operator represents a key's relationship to a set of values.
+	// Valid operators are In, NotIn, Exists and DoesNotExist.
+	Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
+	// values is an array of string values. If the operator is In or NotIn,
+	// the values array must be non-empty. If the operator is Exists or DoesNotExist,
+	// the values array must be empty. This array is replaced during a strategic
+	// merge patch.
+	// +optional
+	Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A label selector operator is the set of operators that can be used in a selector requirement.
+type LabelSelectorOperator string
+
+const (
+	LabelSelectorOpIn           LabelSelectorOperator = "In"
+	LabelSelectorOpNotIn        LabelSelectorOperator = "NotIn"
+	LabelSelectorOpExists       LabelSelectorOperator = "Exists"
+	LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist"
+)
+
+// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
+// that the fieldset applies to.
+type ManagedFieldsEntry struct {
+	// Manager is an identifier of the workflow managing these fields.
+	Manager string `json:"manager,omitempty" protobuf:"bytes,1,opt,name=manager"`
+	// Operation is the type of operation which lead to this ManagedFieldsEntry being created.
+	// The only valid values for this field are 'Apply' and 'Update'.
+	Operation ManagedFieldsOperationType `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ManagedFieldsOperationType"`
+	// APIVersion defines the version of this resource that this field set
+	// applies to. The format is "group/version" just like the top-level
+	// APIVersion field. It is necessary to track the version of a field
+	// set because it cannot be automatically converted.
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"`
+	// Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'
+	// +optional
+	Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"`
+
+	// Fields is tombstoned to show why 5 is a reserved protobuf tag.
+	//Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"`
+
+	// FieldsType is the discriminator for the different fields format and version.
+	// There is currently only one possible value: "FieldsV1"
+	FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"`
+	// FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
+	// +optional
+	FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"`
+}
+
+// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created.
+type ManagedFieldsOperationType string
+
+const (
+	ManagedFieldsOperationApply  ManagedFieldsOperationType = "Apply"
+	ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update"
+)
+
+// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.
+//
+// Each key is either a '.' representing the field itself, and will always map to an empty set,
+// or a string representing a sub-field or item. The string will follow one of these four formats:
+// 'f:<name>', where <name> is the name of a field in a struct, or key in a map
+// 'v:<value>', where <value> is the exact json formatted value of a list item
+// 'i:<index>', where <index> is position of a item in a list
+// 'k:<keys>', where <keys> is a map of  a list item's key fields to their unique values
+// If a key maps to an empty Fields value, the field that key represents is part of the set.
+//
+// The exact format is defined in sigs.k8s.io/structured-merge-diff
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type FieldsV1 struct {
+	// Raw is the underlying serialization of this object.
+	Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"`
+}
+
+func (f FieldsV1) String() string {
+	return string(f.Raw)
+}
+
+// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf
+//   generation to support a meta type that can accept any valid JSON. This can be introduced
+//   in a v1 because clients a) receive an error if they try to access proto today, and b)
+//   once introduced they would be able to gracefully switch over to using it.
+
+// Table is a tabular representation of a set of API resources. The server transforms the
+// object into a set of preferred columns for quickly reviewing the objects.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +protobuf=false
+type Table struct {
+	TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	ListMeta `json:"metadata,omitempty"`
+
+	// columnDefinitions describes each column in the returned items array. The number of cells per row
+	// will always match the number of column definitions.
+	ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"`
+	// rows is the list of items in the table.
+	Rows []TableRow `json:"rows"`
+}
+
+// TableColumnDefinition contains information about a column returned in the Table.
+// +protobuf=false
+type TableColumnDefinition struct {
+	// name is a human readable name for the column.
+	Name string `json:"name"`
+	// type is an OpenAPI type definition for this column, such as number, integer, string, or
+	// array.
+	// See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.
+	Type string `json:"type"`
+	// format is an optional OpenAPI type modifier for this column. A format modifies the type and
+	// imposes additional rules, like date or time formatting for a string. The 'name' format is applied
+	// to the primary identifier column which has type 'string' to assist in clients identifying column
+	// is the resource name.
+	// See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.
+	Format string `json:"format"`
+	// description is a human readable description of this column.
+	Description string `json:"description"`
+	// priority is an integer defining the relative importance of this column compared to others. Lower
+	// numbers are considered higher priority. Columns that may be omitted in limited space scenarios
+	// should be given a higher priority.
+	Priority int32 `json:"priority"`
+}
+
+// TableRow is an individual row in a table.
+// +protobuf=false
+type TableRow struct {
+	// cells will be as wide as the column definitions array and may contain strings, numbers (float64 or
+	// int64), booleans, simple maps, lists, or null. See the type field of the column definition for a
+	// more detailed description.
+	Cells []interface{} `json:"cells"`
+	// conditions describe additional status of a row that are relevant for a human user. These conditions
+	// apply to the row, not to the object, and will be specific to table output. The only defined
+	// condition type is 'Completed', for a row that indicates a resource that has run to completion and
+	// can be given less visual priority.
+	// +optional
+	Conditions []TableRowCondition `json:"conditions,omitempty"`
+	// This field contains the requested additional information about each object based on the includeObject
+	// policy when requesting the Table. If "None", this field is empty, if "Object" this will be the
+	// default serialization of the object for the current API version, and if "Metadata" (the default) will
+	// contain the object metadata. Check the returned kind and apiVersion of the object before parsing.
+	// The media type of the object will always match the enclosing list - if this as a JSON table, these
+	// will be JSON encoded objects.
+	// +optional
+	Object runtime.RawExtension `json:"object,omitempty"`
+}
+
+// TableRowCondition allows a row to be marked with additional information.
+// +protobuf=false
+type TableRowCondition struct {
+	// Type of row condition. The only defined value is 'Completed' indicating that the
+	// object this row represents has reached a completed state and may be given less visual
+	// priority than other rows. Clients are not required to honor any conditions but should
+	// be consistent where possible about handling the conditions.
+	Type RowConditionType `json:"type"`
+	// Status of the condition, one of True, False, Unknown.
+	Status ConditionStatus `json:"status"`
+	// (brief) machine readable reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty"`
+	// Human readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty"`
+}
+
+type RowConditionType string
+
+// These are valid conditions of a row. This list is not exhaustive and new conditions may be
+// included by other resources.
+const (
+	// RowCompleted means the underlying resource has reached completion and may be given less
+	// visual priority than other resources.
+	RowCompleted RowConditionType = "Completed"
+)
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+	ConditionTrue    ConditionStatus = "True"
+	ConditionFalse   ConditionStatus = "False"
+	ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// IncludeObjectPolicy controls which portion of the object is returned with a Table.
+type IncludeObjectPolicy string
+
+const (
+	// IncludeNone returns no object.
+	IncludeNone IncludeObjectPolicy = "None"
+	// IncludeMetadata serializes the object containing only its metadata field.
+	IncludeMetadata IncludeObjectPolicy = "Metadata"
+	// IncludeObject contains the full object.
+	IncludeObject IncludeObjectPolicy = "Object"
+)
+
+// TableOptions are used when a Table is requested by the caller.
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type TableOptions struct {
+	TypeMeta `json:",inline"`
+
+	// NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions
+	// and may be removed as a field in a future release.
+	NoHeaders bool `json:"-"`
+
+	// includeObject decides whether to include each object along with its columnar information.
+	// Specifying "None" will return no object, specifying "Object" will return the full object contents, and
+	// specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind
+	// in version v1beta1 of the meta.k8s.io API group.
+	IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"`
+}
+
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadata struct {
+	TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+}
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadataList struct {
+	TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items contains each of the included items.
+	Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Condition contains details for one aspect of the current state of this API Resource.
+// ---
+// This struct is intended for direct use as an array at the field path .status.conditions.  For example,
+// type FooStatus struct{
+//     // Represents the observations of a foo's current state.
+//     // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
+//     // +patchMergeKey=type
+//     // +patchStrategy=merge
+//     // +listType=map
+//     // +listMapKey=type
+//     Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+//
+//     // other fields
+// }
+type Condition struct {
+	// type of condition in CamelCase or in foo.example.com/CamelCase.
+	// ---
+	// Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
+	// useful (see .node.status.conditions), the ability to deconflict is important.
+	// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+	// +required
+	// +kubebuilder:validation:Required
+	// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+	// +kubebuilder:validation:MaxLength=316
+	Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+	// status of the condition, one of True, False, Unknown.
+	// +required
+	// +kubebuilder:validation:Required
+	// +kubebuilder:validation:Enum=True;False;Unknown
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"`
+	// observedGeneration represents the .metadata.generation that the condition was set based upon.
+	// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+	// with respect to the current state of the instance.
+	// +optional
+	// +kubebuilder:validation:Minimum=0
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+	// lastTransitionTime is the last time the condition transitioned from one status to another.
+	// This should be when the underlying condition changed.  If that is not known, then using the time when the API field changed is acceptable.
+	// +required
+	// +kubebuilder:validation:Required
+	// +kubebuilder:validation:Type=string
+	// +kubebuilder:validation:Format=date-time
+	LastTransitionTime Time `json:"lastTransitionTime" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// reason contains a programmatic identifier indicating the reason for the condition's last transition.
+	// Producers of specific condition types may define expected values and meanings for this field,
+	// and whether the values are considered a guaranteed API.
+	// The value should be a CamelCase string.
+	// This field may not be empty.
+	// +required
+	// +kubebuilder:validation:Required
+	// +kubebuilder:validation:MaxLength=1024
+	// +kubebuilder:validation:MinLength=1
+	// +kubebuilder:validation:Pattern=`^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$`
+	Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"`
+	// message is a human readable message indicating details about the transition.
+	// This may be an empty string.
+	// +required
+	// +kubebuilder:validation:Required
+	// +kubebuilder:validation:MaxLength=32768
+	Message string `json:"message" protobuf:"bytes,6,opt,name=message"`
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000..ace0abf
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -0,0 +1,457 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_APIGroup = map[string]string{
+	"":                           "APIGroup contains the name, the supported versions, and the preferred version of a group.",
+	"name":                       "name is the name of the group.",
+	"versions":                   "versions are the versions supported in this group.",
+	"preferredVersion":           "preferredVersion is the version preferred by the API server, which probably is the storage version.",
+	"serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
+}
+
+func (APIGroup) SwaggerDoc() map[string]string {
+	return map_APIGroup
+}
+
+var map_APIGroupList = map[string]string{
+	"":       "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
+	"groups": "groups is a list of APIGroup.",
+}
+
+func (APIGroupList) SwaggerDoc() map[string]string {
+	return map_APIGroupList
+}
+
+var map_APIResource = map[string]string{
+	"":                   "APIResource specifies the name of a resource and whether it is namespaced.",
+	"name":               "name is the plural name of the resource.",
+	"singularName":       "singularName is the singular name of the resource.  This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
+	"namespaced":         "namespaced indicates if a resource is namespaced or not.",
+	"group":              "group is the preferred group of the resource.  Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
+	"version":            "version is the preferred version of the resource.  Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
+	"kind":               "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
+	"verbs":              "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
+	"shortNames":         "shortNames is a list of suggested short names of the resource.",
+	"categories":         "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
+	"storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.",
+}
+
+func (APIResource) SwaggerDoc() map[string]string {
+	return map_APIResource
+}
+
+var map_APIResourceList = map[string]string{
+	"":             "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
+	"groupVersion": "groupVersion is the group and version this APIResourceList is for.",
+	"resources":    "resources contains the name of the resources and if they are namespaced.",
+}
+
+func (APIResourceList) SwaggerDoc() map[string]string {
+	return map_APIResourceList
+}
+
+var map_APIVersions = map[string]string{
+	"":                           "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.",
+	"versions":                   "versions are the api versions that are available.",
+	"serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
+}
+
+func (APIVersions) SwaggerDoc() map[string]string {
+	return map_APIVersions
+}
+
+var map_Condition = map[string]string{
+	"":                   "Condition contains details for one aspect of the current state of this API Resource.",
+	"type":               "type of condition in CamelCase or in foo.example.com/CamelCase.",
+	"status":             "status of the condition, one of True, False, Unknown.",
+	"observedGeneration": "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.",
+	"lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed.  If that is not known, then using the time when the API field changed is acceptable.",
+	"reason":             "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.",
+	"message":            "message is a human readable message indicating details about the transition. This may be an empty string.",
+}
+
+func (Condition) SwaggerDoc() map[string]string {
+	return map_Condition
+}
+
+var map_CreateOptions = map[string]string{
+	"":             "CreateOptions may be provided when creating an API object.",
+	"dryRun":       "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+	"fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+}
+
+func (CreateOptions) SwaggerDoc() map[string]string {
+	return map_CreateOptions
+}
+
+var map_DeleteOptions = map[string]string{
+	"":                   "DeleteOptions may be provided when deleting an API object.",
+	"gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+	"preconditions":      "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
+	"orphanDependents":   "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+	"propagationPolicy":  "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+	"dryRun":             "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+}
+
+func (DeleteOptions) SwaggerDoc() map[string]string {
+	return map_DeleteOptions
+}
+
+var map_ExportOptions = map[string]string{
+	"":       "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.",
+	"export": "Should this value be exported.  Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.",
+	"exact":  "Should the export be exact.  Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.",
+}
+
+func (ExportOptions) SwaggerDoc() map[string]string {
+	return map_ExportOptions
+}
+
+var map_FieldsV1 = map[string]string{
+	"": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of  a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
+}
+
+func (FieldsV1) SwaggerDoc() map[string]string {
+	return map_FieldsV1
+}
+
+var map_GetOptions = map[string]string{
+	"":                "GetOptions is the standard query options to the standard REST get call.",
+	"resourceVersion": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
+}
+
+func (GetOptions) SwaggerDoc() map[string]string {
+	return map_GetOptions
+}
+
+var map_GroupVersionForDiscovery = map[string]string{
+	"":             "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
+	"groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"",
+	"version":      "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
+}
+
+func (GroupVersionForDiscovery) SwaggerDoc() map[string]string {
+	return map_GroupVersionForDiscovery
+}
+
+var map_LabelSelector = map[string]string{
+	"":                 "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
+	"matchLabels":      "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
+	"matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
+}
+
+func (LabelSelector) SwaggerDoc() map[string]string {
+	return map_LabelSelector
+}
+
+var map_LabelSelectorRequirement = map[string]string{
+	"":         "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+	"key":      "key is the label key that the selector applies to.",
+	"operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.",
+	"values":   "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (LabelSelectorRequirement) SwaggerDoc() map[string]string {
+	return map_LabelSelectorRequirement
+}
+
+var map_List = map[string]string{
+	"":         "List holds a list of objects, which may not be known by the server.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of objects",
+}
+
+func (List) SwaggerDoc() map[string]string {
+	return map_List
+}
+
+var map_ListMeta = map[string]string{
+	"":                   "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
+	"selfLink":           "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
+	"resourceVersion":    "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+	"continue":           "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
+	"remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.",
+}
+
+func (ListMeta) SwaggerDoc() map[string]string {
+	return map_ListMeta
+}
+
+var map_ListOptions = map[string]string{
+	"":                     "ListOptions is the query options to a standard REST list call.",
+	"labelSelector":        "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+	"fieldSelector":        "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+	"watch":                "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+	"allowWatchBookmarks":  "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+	"resourceVersion":      "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
+	"resourceVersionMatch": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
+	"timeoutSeconds":       "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+	"limit":                "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+	"continue":             "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+}
+
+func (ListOptions) SwaggerDoc() map[string]string {
+	return map_ListOptions
+}
+
+var map_ManagedFieldsEntry = map[string]string{
+	"":           "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
+	"manager":    "Manager is an identifier of the workflow managing these fields.",
+	"operation":  "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
+	"apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
+	"time":       "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
+	"fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
+	"fieldsV1":   "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
+}
+
+func (ManagedFieldsEntry) SwaggerDoc() map[string]string {
+	return map_ManagedFieldsEntry
+}
+
+var map_ObjectMeta = map[string]string{
+	"":                           "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+	"name":                       "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+	"generateName":               "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
+	"namespace":                  "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
+	"selfLink":                   "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
+	"uid":                        "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+	"resourceVersion":            "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+	"generation":                 "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
+	"creationTimestamp":          "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"deletionTimestamp":          "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
+	"labels":                     "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
+	"annotations":                "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
+	"ownerReferences":            "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+	"finalizers":                 "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order.  Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.",
+	"clusterName":                "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
+	"managedFields":              "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.",
+}
+
+func (ObjectMeta) SwaggerDoc() map[string]string {
+	return map_ObjectMeta
+}
+
+var map_OwnerReference = map[string]string{
+	"":                   "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
+	"apiVersion":         "API version of the referent.",
+	"kind":               "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"name":               "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
+	"uid":                "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+	"controller":         "If true, this reference points to the managing controller.",
+	"blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
+}
+
+func (OwnerReference) SwaggerDoc() map[string]string {
+	return map_OwnerReference
+}
+
+var map_PartialObjectMetadata = map[string]string{
+	"":         "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+}
+
+func (PartialObjectMetadata) SwaggerDoc() map[string]string {
+	return map_PartialObjectMetadata
+}
+
+var map_PartialObjectMetadataList = map[string]string{
+	"":         "PartialObjectMetadataList contains a list of objects containing only their metadata",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "items contains each of the included items.",
+}
+
+func (PartialObjectMetadataList) SwaggerDoc() map[string]string {
+	return map_PartialObjectMetadataList
+}
+
+var map_Patch = map[string]string{
+	"": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
+}
+
+func (Patch) SwaggerDoc() map[string]string {
+	return map_Patch
+}
+
+var map_PatchOptions = map[string]string{
+	"":             "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.",
+	"dryRun":       "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+	"force":        "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+	"fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+}
+
+func (PatchOptions) SwaggerDoc() map[string]string {
+	return map_PatchOptions
+}
+
+var map_Preconditions = map[string]string{
+	"":                "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+	"uid":             "Specifies the target UID.",
+	"resourceVersion": "Specifies the target ResourceVersion",
+}
+
+func (Preconditions) SwaggerDoc() map[string]string {
+	return map_Preconditions
+}
+
+var map_RootPaths = map[string]string{
+	"":      "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".",
+	"paths": "paths are the paths available at root.",
+}
+
+func (RootPaths) SwaggerDoc() map[string]string {
+	return map_RootPaths
+}
+
+var map_ServerAddressByClientCIDR = map[string]string{
+	"":              "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
+	"clientCIDR":    "The CIDR with which clients can match their IP to figure out the server address that they should use.",
+	"serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
+}
+
+func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string {
+	return map_ServerAddressByClientCIDR
+}
+
+var map_Status = map[string]string{
+	"":         "Status is a return value for calls that don't return other objects.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"status":   "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+	"message":  "A human-readable description of the status of this operation.",
+	"reason":   "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
+	"details":  "Extended data associated with the reason.  Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
+	"code":     "Suggested HTTP return code for this status, 0 if not set.",
+}
+
+func (Status) SwaggerDoc() map[string]string {
+	return map_Status
+}
+
+var map_StatusCause = map[string]string{
+	"":        "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
+	"reason":  "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
+	"message": "A human-readable description of the cause of the error.  This field may be presented as-is to a reader.",
+	"field":   "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed.  Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n  \"name\" - the field \"name\" on the current resource\n  \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
+}
+
+func (StatusCause) SwaggerDoc() map[string]string {
+	return map_StatusCause
+}
+
+var map_StatusDetails = map[string]string{
+	"":                  "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
+	"name":              "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
+	"group":             "The group attribute of the resource associated with the status StatusReason.",
+	"kind":              "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"uid":               "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
+	"causes":            "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
+	"retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
+}
+
+func (StatusDetails) SwaggerDoc() map[string]string {
+	return map_StatusDetails
+}
+
+var map_Table = map[string]string{
+	"":                  "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.",
+	"metadata":          "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.",
+	"rows":              "rows is the list of items in the table.",
+}
+
+func (Table) SwaggerDoc() map[string]string {
+	return map_Table
+}
+
+var map_TableColumnDefinition = map[string]string{
+	"":            "TableColumnDefinition contains information about a column returned in the Table.",
+	"name":        "name is a human readable name for the column.",
+	"type":        "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
+	"format":      "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
+	"description": "description is a human readable description of this column.",
+	"priority":    "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.",
+}
+
+func (TableColumnDefinition) SwaggerDoc() map[string]string {
+	return map_TableColumnDefinition
+}
+
+var map_TableOptions = map[string]string{
+	"":              "TableOptions are used when a Table is requested by the caller.",
+	"includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.",
+}
+
+func (TableOptions) SwaggerDoc() map[string]string {
+	return map_TableOptions
+}
+
+var map_TableRow = map[string]string{
+	"":           "TableRow is an individual row in a table.",
+	"cells":      "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.",
+	"conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.",
+	"object":     "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.",
+}
+
+func (TableRow) SwaggerDoc() map[string]string {
+	return map_TableRow
+}
+
+var map_TableRowCondition = map[string]string{
+	"":        "TableRowCondition allows a row to be marked with additional information.",
+	"type":    "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.",
+	"status":  "Status of the condition, one of True, False, Unknown.",
+	"reason":  "(brief) machine readable reason for the condition's last transition.",
+	"message": "Human readable message indicating details about last transition.",
+}
+
+func (TableRowCondition) SwaggerDoc() map[string]string {
+	return map_TableRowCondition
+}
+
+var map_TypeMeta = map[string]string{
+	"":           "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.",
+	"kind":       "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+}
+
+func (TypeMeta) SwaggerDoc() map[string]string {
+	return map_TypeMeta
+}
+
+var map_UpdateOptions = map[string]string{
+	"":             "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.",
+	"dryRun":       "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+	"fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+}
+
+func (UpdateOptions) SwaggerDoc() map[string]string {
+	return map_UpdateOptions
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
new file mode 100644
index 0000000..54a231e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -0,0 +1,508 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+	gojson "encoding/json"
+	"fmt"
+	"io"
+	"strings"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/json"
+	"k8s.io/klog/v2"
+)
+
+// NestedFieldCopy returns a deep copy of the value of a nested field.
+// Returns false if the value is missing.
+// No error is returned for a nil field.
+//
+// Note: fields passed to this function are treated as keys within the passed
+// object; no array/slice syntax is supported.
+func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	return runtime.DeepCopyJSONValue(val), true, nil
+}
+
+// NestedFieldNoCopy returns a reference to a nested field.
+// Returns false if value is not found and an error if unable
+// to traverse obj.
+//
+// Note: fields passed to this function are treated as keys within the passed
+// object; no array/slice syntax is supported.
+func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) {
+	var val interface{} = obj
+
+	for i, field := range fields {
+		if val == nil {
+			return nil, false, nil
+		}
+		if m, ok := val.(map[string]interface{}); ok {
+			val, ok = m[field]
+			if !ok {
+				return nil, false, nil
+			}
+		} else {
+			return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val)
+		}
+	}
+	return val, true, nil
+}
+
+// NestedString returns the string value of a nested field.
+// Returns false if value is not found and an error if not a string.
+func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return "", found, err
+	}
+	s, ok := val.(string)
+	if !ok {
+		return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val)
+	}
+	return s, true, nil
+}
+
+// NestedBool returns the bool value of a nested field.
+// Returns false if value is not found and an error if not a bool.
+func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return false, found, err
+	}
+	b, ok := val.(bool)
+	if !ok {
+		return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val)
+	}
+	return b, true, nil
+}
+
+// NestedFloat64 returns the float64 value of a nested field.
+// Returns false if value is not found and an error if not a float64.
+func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return 0, found, err
+	}
+	f, ok := val.(float64)
+	if !ok {
+		return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val)
+	}
+	return f, true, nil
+}
+
+// NestedInt64 returns the int64 value of a nested field.
+// Returns false if value is not found and an error if not an int64.
+func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return 0, found, err
+	}
+	i, ok := val.(int64)
+	if !ok {
+		return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val)
+	}
+	return i, true, nil
+}
+
+// NestedStringSlice returns a copy of []string value of a nested field.
+// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
+func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	m, ok := val.([]interface{})
+	if !ok {
+		return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
+	}
+	strSlice := make([]string, 0, len(m))
+	for _, v := range m {
+		if str, ok := v.(string); ok {
+			strSlice = append(strSlice, str)
+		} else {
+			return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v)
+		}
+	}
+	return strSlice, true, nil
+}
+
+// NestedSlice returns a deep copy of []interface{} value of a nested field.
+// Returns false if value is not found and an error if not a []interface{}.
+func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	_, ok := val.([]interface{})
+	if !ok {
+		return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val)
+	}
+	return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil
+}
+
+// NestedStringMap returns a copy of map[string]string value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map.
+func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) {
+	m, found, err := nestedMapNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	strMap := make(map[string]string, len(m))
+	for k, v := range m {
+		if str, ok := v.(string); ok {
+			strMap[k] = str
+		} else {
+			return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v)
+		}
+	}
+	return strMap, true, nil
+}
+
+// NestedMap returns a deep copy of map[string]interface{} value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{}.
+func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+	m, found, err := nestedMapNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	return runtime.DeepCopyJSON(m), true, nil
+}
+
+// nestedMapNoCopy returns a map[string]interface{} value of a nested field.
+// Returns false if value is not found and an error if not a map[string]interface{}.
+func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return nil, found, err
+	}
+	m, ok := val.(map[string]interface{})
+	if !ok {
+		return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val)
+	}
+	return m, true, nil
+}
+
+// SetNestedField sets the value of a nested field to a deep copy of the value provided.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error {
+	return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...)
+}
+
+func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error {
+	m := obj
+
+	for i, field := range fields[:len(fields)-1] {
+		if val, ok := m[field]; ok {
+			if valMap, ok := val.(map[string]interface{}); ok {
+				m = valMap
+			} else {
+				return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1]))
+			}
+		} else {
+			newVal := make(map[string]interface{})
+			m[field] = newVal
+			m = newVal
+		}
+	}
+	m[fields[len(fields)-1]] = value
+	return nil
+}
+
+// SetNestedStringSlice sets the string slice value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error {
+	m := make([]interface{}, 0, len(value)) // convert []string into []interface{}
+	for _, v := range value {
+		m = append(m, v)
+	}
+	return setNestedFieldNoCopy(obj, m, fields...)
+}
+
+// SetNestedSlice sets the slice value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error {
+	return SetNestedField(obj, value, fields...)
+}
+
+// SetNestedStringMap sets the map[string]string value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error {
+	m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{}
+	for k, v := range value {
+		m[k] = v
+	}
+	return setNestedFieldNoCopy(obj, m, fields...)
+}
+
+// SetNestedMap sets the map[string]interface{} value of a nested field.
+// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}.
+func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error {
+	return SetNestedField(obj, value, fields...)
+}
+
+// RemoveNestedField removes the nested field from the obj.
+func RemoveNestedField(obj map[string]interface{}, fields ...string) {
+	m := obj
+	for _, field := range fields[:len(fields)-1] {
+		if x, ok := m[field].(map[string]interface{}); ok {
+			m = x
+		} else {
+			return
+		}
+	}
+	delete(m, fields[len(fields)-1])
+}
+
+func getNestedString(obj map[string]interface{}, fields ...string) string {
+	val, found, err := NestedString(obj, fields...)
+	if !found || err != nil {
+		return ""
+	}
+	return val
+}
+
+func getNestedInt64(obj map[string]interface{}, fields ...string) int64 {
+	val, found, err := NestedInt64(obj, fields...)
+	if !found || err != nil {
+		return 0
+	}
+	return val
+}
+
+func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 {
+	val, found, err := NestedInt64(obj, fields...)
+	if !found || err != nil {
+		return nil
+	}
+	return &val
+}
+
+func jsonPath(fields []string) string {
+	return "." + strings.Join(fields, ".")
+}
+
+func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference {
+	// though this field is a *bool, but when decoded from JSON, it's
+	// unmarshalled as bool.
+	var controllerPtr *bool
+	if controller, found, err := NestedBool(v, "controller"); err == nil && found {
+		controllerPtr = &controller
+	}
+	var blockOwnerDeletionPtr *bool
+	if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found {
+		blockOwnerDeletionPtr = &blockOwnerDeletion
+	}
+	return metav1.OwnerReference{
+		Kind:               getNestedString(v, "kind"),
+		Name:               getNestedString(v, "name"),
+		APIVersion:         getNestedString(v, "apiVersion"),
+		UID:                types.UID(getNestedString(v, "uid")),
+		Controller:         controllerPtr,
+		BlockOwnerDeletion: blockOwnerDeletionPtr,
+	}
+}
+
+// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured
+// type, which can be used for generic access to objects without a predefined scheme.
+// TODO: move into serializer/json.
+var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{}
+
+type unstructuredJSONScheme struct{}
+
+const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON"
+
+func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	var err error
+	if obj != nil {
+		err = s.decodeInto(data, obj)
+	} else {
+		obj, err = s.decode(data)
+	}
+
+	if err != nil {
+		return nil, nil, err
+	}
+
+	gvk := obj.GetObjectKind().GroupVersionKind()
+	if len(gvk.Kind) == 0 {
+		return nil, &gvk, runtime.NewMissingKindErr(string(data))
+	}
+
+	return obj, &gvk, nil
+}
+
+func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, w)
+	}
+	return s.doEncode(obj, w)
+}
+
+func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error {
+	switch t := obj.(type) {
+	case *Unstructured:
+		return json.NewEncoder(w).Encode(t.Object)
+	case *UnstructuredList:
+		items := make([]interface{}, 0, len(t.Items))
+		for _, i := range t.Items {
+			items = append(items, i.Object)
+		}
+		listObj := make(map[string]interface{}, len(t.Object)+1)
+		for k, v := range t.Object { // Make a shallow copy
+			listObj[k] = v
+		}
+		listObj["items"] = items
+		return json.NewEncoder(w).Encode(listObj)
+	case *runtime.Unknown:
+		// TODO: Unstructured needs to deal with ContentType.
+		_, err := w.Write(t.Raw)
+		return err
+	default:
+		return json.NewEncoder(w).Encode(t)
+	}
+}
+
+// Identifier implements runtime.Encoder interface.
+func (unstructuredJSONScheme) Identifier() runtime.Identifier {
+	return unstructuredJSONSchemeIdentifier
+}
+
+func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) {
+	type detector struct {
+		Items gojson.RawMessage
+	}
+	var det detector
+	if err := json.Unmarshal(data, &det); err != nil {
+		return nil, err
+	}
+
+	if det.Items != nil {
+		list := &UnstructuredList{}
+		err := s.decodeToList(data, list)
+		return list, err
+	}
+
+	// No Items field, so it wasn't a list.
+	unstruct := &Unstructured{}
+	err := s.decodeToUnstructured(data, unstruct)
+	return unstruct, err
+}
+
+func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error {
+	switch x := obj.(type) {
+	case *Unstructured:
+		return s.decodeToUnstructured(data, x)
+	case *UnstructuredList:
+		return s.decodeToList(data, x)
+	default:
+		return json.Unmarshal(data, x)
+	}
+}
+
+func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error {
+	m := make(map[string]interface{})
+	if err := json.Unmarshal(data, &m); err != nil {
+		return err
+	}
+
+	unstruct.Object = m
+
+	return nil
+}
+
+func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error {
+	type decodeList struct {
+		Items []gojson.RawMessage
+	}
+
+	var dList decodeList
+	if err := json.Unmarshal(data, &dList); err != nil {
+		return err
+	}
+
+	if err := json.Unmarshal(data, &list.Object); err != nil {
+		return err
+	}
+
+	// For typed lists, e.g., a PodList, API server doesn't set each item's
+	// APIVersion and Kind. We need to set it.
+	listAPIVersion := list.GetAPIVersion()
+	listKind := list.GetKind()
+	itemKind := strings.TrimSuffix(listKind, "List")
+
+	delete(list.Object, "items")
+	list.Items = make([]Unstructured, 0, len(dList.Items))
+	for _, i := range dList.Items {
+		unstruct := &Unstructured{}
+		if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil {
+			return err
+		}
+		// This is hacky. Set the item's Kind and APIVersion to those inferred
+		// from the List.
+		if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 {
+			unstruct.SetKind(itemKind)
+			unstruct.SetAPIVersion(listAPIVersion)
+		}
+		list.Items = append(list.Items, *unstruct)
+	}
+	return nil
+}
+
+type jsonFallbackEncoder struct {
+	encoder    runtime.Encoder
+	identifier runtime.Identifier
+}
+
+func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder {
+	result := map[string]string{
+		"name": "fallback",
+		"base": string(encoder.Identifier()),
+	}
+	identifier, err := gojson.Marshal(result)
+	if err != nil {
+		klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err)
+	}
+	return &jsonFallbackEncoder{
+		encoder:    encoder,
+		identifier: runtime.Identifier(identifier),
+	}
+}
+
+func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error {
+	// There is no need to handle runtime.CacheableObject, as we only
+	// fallback to other encoders here.
+	err := c.encoder.Encode(obj, w)
+	if runtime.IsNotRegisteredError(err) {
+		switch obj.(type) {
+		case *Unstructured, *UnstructuredList:
+			return UnstructuredJSONScheme.Encode(obj, w)
+		}
+	}
+	return err
+}
+
+// Identifier implements runtime.Encoder interface.
+func (c *jsonFallbackEncoder) Identifier() runtime.Identifier {
+	return c.identifier
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
new file mode 100644
index 0000000..d190339
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -0,0 +1,496 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Unstructured allows objects that do not have Golang structs registered to be manipulated
+// generically. This can be used to deal with the API objects from a plug-in. Unstructured
+// objects still have functioning TypeMeta features-- kind, version, etc.
+//
+// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this
+// type if you are dealing with objects that are not in the server meta v1 schema.
+//
+// TODO: make the serialization part of this type distinct from the field accessors.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:deepcopy-gen=true
+type Unstructured struct {
+	// Object is a JSON compatible map with string, float, int, bool, []interface{}, or
+	// map[string]interface{}
+	// children.
+	Object map[string]interface{}
+}
+
+var _ metav1.Object = &Unstructured{}
+var _ runtime.Unstructured = &Unstructured{}
+var _ metav1.ListInterface = &Unstructured{}
+
+func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj }
+
+func (obj *Unstructured) IsList() bool {
+	field, ok := obj.Object["items"]
+	if !ok {
+		return false
+	}
+	_, ok = field.([]interface{})
+	return ok
+}
+func (obj *Unstructured) ToList() (*UnstructuredList, error) {
+	if !obj.IsList() {
+		// return an empty list back
+		return &UnstructuredList{Object: obj.Object}, nil
+	}
+
+	ret := &UnstructuredList{}
+	ret.Object = obj.Object
+
+	err := obj.EachListItem(func(item runtime.Object) error {
+		castItem := item.(*Unstructured)
+		ret.Items = append(ret.Items, *castItem)
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return ret, nil
+}
+
+func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error {
+	field, ok := obj.Object["items"]
+	if !ok {
+		return errors.New("content is not a list")
+	}
+	items, ok := field.([]interface{})
+	if !ok {
+		return fmt.Errorf("content is not a list: %T", field)
+	}
+	for _, item := range items {
+		child, ok := item.(map[string]interface{})
+		if !ok {
+			return fmt.Errorf("items member is not an object: %T", child)
+		}
+		if err := fn(&Unstructured{Object: child}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (obj *Unstructured) UnstructuredContent() map[string]interface{} {
+	if obj.Object == nil {
+		return make(map[string]interface{})
+	}
+	return obj.Object
+}
+
+func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) {
+	obj.Object = content
+}
+
+// MarshalJSON ensures that the unstructured object produces proper
+// JSON when passed to Go's standard JSON library.
+func (u *Unstructured) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	err := UnstructuredJSONScheme.Encode(u, &buf)
+	return buf.Bytes(), err
+}
+
+// UnmarshalJSON ensures that the unstructured object properly decodes
+// JSON when passed to Go's standard JSON library.
+func (u *Unstructured) UnmarshalJSON(b []byte) error {
+	_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
+	return err
+}
+
+// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
+// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
+func (in *Unstructured) NewEmptyInstance() runtime.Unstructured {
+	out := new(Unstructured)
+	if in != nil {
+		out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind())
+	}
+	return out
+}
+
+func (in *Unstructured) DeepCopy() *Unstructured {
+	if in == nil {
+		return nil
+	}
+	out := new(Unstructured)
+	*out = *in
+	out.Object = runtime.DeepCopyJSON(in.Object)
+	return out
+}
+
+func (u *Unstructured) setNestedField(value interface{}, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedField(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedStringSlice(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedSlice(u.Object, value, fields...)
+}
+
+func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedStringMap(u.Object, value, fields...)
+}
+
+func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference {
+	field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences")
+	if !found || err != nil {
+		return nil
+	}
+	original, ok := field.([]interface{})
+	if !ok {
+		return nil
+	}
+	ret := make([]metav1.OwnerReference, 0, len(original))
+	for _, obj := range original {
+		o, ok := obj.(map[string]interface{})
+		if !ok {
+			// expected map[string]interface{}, got something else
+			return nil
+		}
+		ret = append(ret, extractOwnerReference(o))
+	}
+	return ret
+}
+
+func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) {
+	if references == nil {
+		RemoveNestedField(u.Object, "metadata", "ownerReferences")
+		return
+	}
+
+	newReferences := make([]interface{}, 0, len(references))
+	for _, reference := range references {
+		out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference)
+		if err != nil {
+			utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err))
+			continue
+		}
+		newReferences = append(newReferences, out)
+	}
+	u.setNestedField(newReferences, "metadata", "ownerReferences")
+}
+
+func (u *Unstructured) GetAPIVersion() string {
+	return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *Unstructured) SetAPIVersion(version string) {
+	u.setNestedField(version, "apiVersion")
+}
+
+func (u *Unstructured) GetKind() string {
+	return getNestedString(u.Object, "kind")
+}
+
+func (u *Unstructured) SetKind(kind string) {
+	u.setNestedField(kind, "kind")
+}
+
+func (u *Unstructured) GetNamespace() string {
+	return getNestedString(u.Object, "metadata", "namespace")
+}
+
+func (u *Unstructured) SetNamespace(namespace string) {
+	if len(namespace) == 0 {
+		RemoveNestedField(u.Object, "metadata", "namespace")
+		return
+	}
+	u.setNestedField(namespace, "metadata", "namespace")
+}
+
+func (u *Unstructured) GetName() string {
+	return getNestedString(u.Object, "metadata", "name")
+}
+
+func (u *Unstructured) SetName(name string) {
+	if len(name) == 0 {
+		RemoveNestedField(u.Object, "metadata", "name")
+		return
+	}
+	u.setNestedField(name, "metadata", "name")
+}
+
+func (u *Unstructured) GetGenerateName() string {
+	return getNestedString(u.Object, "metadata", "generateName")
+}
+
+func (u *Unstructured) SetGenerateName(generateName string) {
+	if len(generateName) == 0 {
+		RemoveNestedField(u.Object, "metadata", "generateName")
+		return
+	}
+	u.setNestedField(generateName, "metadata", "generateName")
+}
+
+func (u *Unstructured) GetUID() types.UID {
+	return types.UID(getNestedString(u.Object, "metadata", "uid"))
+}
+
+func (u *Unstructured) SetUID(uid types.UID) {
+	if len(string(uid)) == 0 {
+		RemoveNestedField(u.Object, "metadata", "uid")
+		return
+	}
+	u.setNestedField(string(uid), "metadata", "uid")
+}
+
+func (u *Unstructured) GetResourceVersion() string {
+	return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) SetResourceVersion(resourceVersion string) {
+	if len(resourceVersion) == 0 {
+		RemoveNestedField(u.Object, "metadata", "resourceVersion")
+		return
+	}
+	u.setNestedField(resourceVersion, "metadata", "resourceVersion")
+}
+
+func (u *Unstructured) GetGeneration() int64 {
+	val, found, err := NestedInt64(u.Object, "metadata", "generation")
+	if !found || err != nil {
+		return 0
+	}
+	return val
+}
+
+func (u *Unstructured) SetGeneration(generation int64) {
+	if generation == 0 {
+		RemoveNestedField(u.Object, "metadata", "generation")
+		return
+	}
+	u.setNestedField(generation, "metadata", "generation")
+}
+
+func (u *Unstructured) GetSelfLink() string {
+	return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *Unstructured) SetSelfLink(selfLink string) {
+	if len(selfLink) == 0 {
+		RemoveNestedField(u.Object, "metadata", "selfLink")
+		return
+	}
+	u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *Unstructured) GetContinue() string {
+	return getNestedString(u.Object, "metadata", "continue")
+}
+
+func (u *Unstructured) SetContinue(c string) {
+	if len(c) == 0 {
+		RemoveNestedField(u.Object, "metadata", "continue")
+		return
+	}
+	u.setNestedField(c, "metadata", "continue")
+}
+
+func (u *Unstructured) GetRemainingItemCount() *int64 {
+	return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
+}
+
+func (u *Unstructured) SetRemainingItemCount(c *int64) {
+	if c == nil {
+		RemoveNestedField(u.Object, "metadata", "remainingItemCount")
+	} else {
+		u.setNestedField(*c, "metadata", "remainingItemCount")
+	}
+}
+
+func (u *Unstructured) GetCreationTimestamp() metav1.Time {
+	var timestamp metav1.Time
+	timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp"))
+	return timestamp
+}
+
+func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) {
+	ts, _ := timestamp.MarshalQueryParameter()
+	if len(ts) == 0 || timestamp.Time.IsZero() {
+		RemoveNestedField(u.Object, "metadata", "creationTimestamp")
+		return
+	}
+	u.setNestedField(ts, "metadata", "creationTimestamp")
+}
+
+func (u *Unstructured) GetDeletionTimestamp() *metav1.Time {
+	var timestamp metav1.Time
+	timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp"))
+	if timestamp.IsZero() {
+		return nil
+	}
+	return &timestamp
+}
+
+func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) {
+	if timestamp == nil {
+		RemoveNestedField(u.Object, "metadata", "deletionTimestamp")
+		return
+	}
+	ts, _ := timestamp.MarshalQueryParameter()
+	u.setNestedField(ts, "metadata", "deletionTimestamp")
+}
+
+func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 {
+	val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds")
+	if !found || err != nil {
+		return nil
+	}
+	return &val
+}
+
+func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) {
+	if deletionGracePeriodSeconds == nil {
+		RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds")
+		return
+	}
+	u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds")
+}
+
+func (u *Unstructured) GetLabels() map[string]string {
+	m, _, _ := NestedStringMap(u.Object, "metadata", "labels")
+	return m
+}
+
+func (u *Unstructured) SetLabels(labels map[string]string) {
+	if labels == nil {
+		RemoveNestedField(u.Object, "metadata", "labels")
+		return
+	}
+	u.setNestedMap(labels, "metadata", "labels")
+}
+
+func (u *Unstructured) GetAnnotations() map[string]string {
+	m, _, _ := NestedStringMap(u.Object, "metadata", "annotations")
+	return m
+}
+
+func (u *Unstructured) SetAnnotations(annotations map[string]string) {
+	if annotations == nil {
+		RemoveNestedField(u.Object, "metadata", "annotations")
+		return
+	}
+	u.setNestedMap(annotations, "metadata", "annotations")
+}
+
+func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	u.SetAPIVersion(gvk.GroupVersion().String())
+	u.SetKind(gvk.Kind)
+}
+
+func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind {
+	gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
+	if err != nil {
+		return schema.GroupVersionKind{}
+	}
+	gvk := gv.WithKind(u.GetKind())
+	return gvk
+}
+
+func (u *Unstructured) GetFinalizers() []string {
+	val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers")
+	return val
+}
+
+func (u *Unstructured) SetFinalizers(finalizers []string) {
+	if finalizers == nil {
+		RemoveNestedField(u.Object, "metadata", "finalizers")
+		return
+	}
+	u.setNestedStringSlice(finalizers, "metadata", "finalizers")
+}
+
+func (u *Unstructured) GetClusterName() string {
+	return getNestedString(u.Object, "metadata", "clusterName")
+}
+
+func (u *Unstructured) SetClusterName(clusterName string) {
+	if len(clusterName) == 0 {
+		RemoveNestedField(u.Object, "metadata", "clusterName")
+		return
+	}
+	u.setNestedField(clusterName, "metadata", "clusterName")
+}
+
+func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
+	items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
+	if !found || err != nil {
+		return nil
+	}
+	managedFields := []metav1.ManagedFieldsEntry{}
+	for _, item := range items {
+		m, ok := item.(map[string]interface{})
+		if !ok {
+			utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item))
+			return nil
+		}
+		out := metav1.ManagedFieldsEntry{}
+		if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil {
+			utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err))
+			return nil
+		}
+		managedFields = append(managedFields, out)
+	}
+	return managedFields
+}
+
+func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) {
+	if managedFields == nil {
+		RemoveNestedField(u.Object, "metadata", "managedFields")
+		return
+	}
+	items := []interface{}{}
+	for _, managedFieldsEntry := range managedFields {
+		out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry)
+		if err != nil {
+			utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err))
+			return
+		}
+		items = append(items, out)
+	}
+	u.setNestedSlice(items, "metadata", "managedFields")
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
new file mode 100644
index 0000000..5028f5f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go
@@ -0,0 +1,210 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructured
+
+import (
+	"bytes"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+var _ runtime.Unstructured = &UnstructuredList{}
+var _ metav1.ListInterface = &UnstructuredList{}
+
+// UnstructuredList allows lists that do not have Golang structs
+// registered to be manipulated generically. This can be used to deal
+// with the API lists from a plug-in.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:deepcopy-gen=true
+type UnstructuredList struct {
+	Object map[string]interface{}
+
+	// Items is a list of unstructured objects.
+	Items []Unstructured `json:"items"`
+}
+
+func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u }
+
+func (u *UnstructuredList) IsList() bool { return true }
+
+func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error {
+	for i := range u.Items {
+		if err := fn(&u.Items[i]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
+// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
+func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured {
+	out := new(UnstructuredList)
+	if u != nil {
+		out.SetGroupVersionKind(u.GroupVersionKind())
+	}
+	return out
+}
+
+// UnstructuredContent returns a map contain an overlay of the Items field onto
+// the Object field. Items always overwrites overlay.
+func (u *UnstructuredList) UnstructuredContent() map[string]interface{} {
+	out := make(map[string]interface{}, len(u.Object)+1)
+
+	// shallow copy every property
+	for k, v := range u.Object {
+		out[k] = v
+	}
+
+	items := make([]interface{}, len(u.Items))
+	for i, item := range u.Items {
+		items[i] = item.UnstructuredContent()
+	}
+	out["items"] = items
+	return out
+}
+
+// SetUnstructuredContent obeys the conventions of List and keeps Items and the items
+// array in sync. If items is not an array of objects in the incoming map, then any
+// mismatched item will be removed.
+func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) {
+	obj.Object = content
+	if content == nil {
+		obj.Items = nil
+		return
+	}
+	items, ok := obj.Object["items"].([]interface{})
+	if !ok || items == nil {
+		items = []interface{}{}
+	}
+	unstructuredItems := make([]Unstructured, 0, len(items))
+	newItems := make([]interface{}, 0, len(items))
+	for _, item := range items {
+		o, ok := item.(map[string]interface{})
+		if !ok {
+			continue
+		}
+		unstructuredItems = append(unstructuredItems, Unstructured{Object: o})
+		newItems = append(newItems, o)
+	}
+	obj.Items = unstructuredItems
+	obj.Object["items"] = newItems
+}
+
+func (u *UnstructuredList) DeepCopy() *UnstructuredList {
+	if u == nil {
+		return nil
+	}
+	out := new(UnstructuredList)
+	*out = *u
+	out.Object = runtime.DeepCopyJSON(u.Object)
+	out.Items = make([]Unstructured, len(u.Items))
+	for i := range u.Items {
+		u.Items[i].DeepCopyInto(&out.Items[i])
+	}
+	return out
+}
+
+// MarshalJSON ensures that the unstructured list object produces proper
+// JSON when passed to Go's standard JSON library.
+func (u *UnstructuredList) MarshalJSON() ([]byte, error) {
+	var buf bytes.Buffer
+	err := UnstructuredJSONScheme.Encode(u, &buf)
+	return buf.Bytes(), err
+}
+
+// UnmarshalJSON ensures that the unstructured list object properly
+// decodes JSON when passed to Go's standard JSON library.
+func (u *UnstructuredList) UnmarshalJSON(b []byte) error {
+	_, _, err := UnstructuredJSONScheme.Decode(b, nil, u)
+	return err
+}
+
+func (u *UnstructuredList) GetAPIVersion() string {
+	return getNestedString(u.Object, "apiVersion")
+}
+
+func (u *UnstructuredList) SetAPIVersion(version string) {
+	u.setNestedField(version, "apiVersion")
+}
+
+func (u *UnstructuredList) GetKind() string {
+	return getNestedString(u.Object, "kind")
+}
+
+func (u *UnstructuredList) SetKind(kind string) {
+	u.setNestedField(kind, "kind")
+}
+
+func (u *UnstructuredList) GetResourceVersion() string {
+	return getNestedString(u.Object, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) SetResourceVersion(version string) {
+	u.setNestedField(version, "metadata", "resourceVersion")
+}
+
+func (u *UnstructuredList) GetSelfLink() string {
+	return getNestedString(u.Object, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) SetSelfLink(selfLink string) {
+	u.setNestedField(selfLink, "metadata", "selfLink")
+}
+
+func (u *UnstructuredList) GetContinue() string {
+	return getNestedString(u.Object, "metadata", "continue")
+}
+
+func (u *UnstructuredList) SetContinue(c string) {
+	u.setNestedField(c, "metadata", "continue")
+}
+
+func (u *UnstructuredList) GetRemainingItemCount() *int64 {
+	return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount")
+}
+
+func (u *UnstructuredList) SetRemainingItemCount(c *int64) {
+	if c == nil {
+		RemoveNestedField(u.Object, "metadata", "remainingItemCount")
+	} else {
+		u.setNestedField(*c, "metadata", "remainingItemCount")
+	}
+}
+
+func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	u.SetAPIVersion(gvk.GroupVersion().String())
+	u.SetKind(gvk.Kind)
+}
+
+func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind {
+	gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
+	if err != nil {
+		return schema.GroupVersionKind{}
+	}
+	gvk := gv.WithKind(u.GetKind())
+	return gvk
+}
+
+func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) {
+	if u.Object == nil {
+		u.Object = make(map[string]interface{})
+	}
+	SetNestedField(u.Object, value, fields...)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
new file mode 100644
index 0000000..9a9f25e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go
@@ -0,0 +1,55 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package unstructured
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Unstructured) DeepCopyInto(out *Unstructured) {
+	clone := in.DeepCopy()
+	*out = *clone
+	return
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Unstructured) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UnstructuredList) DeepCopyInto(out *UnstructuredList) {
+	clone := in.DeepCopy()
+	*out = *clone
+	return
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UnstructuredList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
new file mode 100644
index 0000000..58f0773
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Event represents a single event to a watched resource.
+//
+// +protobuf=true
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type WatchEvent struct {
+	Type string `json:"type" protobuf:"bytes,1,opt,name=type"`
+
+	// Object is:
+	//  * If Type is Added or Modified: the new state of the object.
+	//  * If Type is Deleted: the state of the object immediately before deletion.
+	//  * If Type is Error: *Status is recommended; other types may make sense
+	//    depending on context.
+	Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"`
+}
+
+func Convert_watch_Event_To_v1_WatchEvent(in *watch.Event, out *WatchEvent, s conversion.Scope) error {
+	out.Type = string(in.Type)
+	switch t := in.Object.(type) {
+	case *runtime.Unknown:
+		// TODO: handle other fields on Unknown and detect type
+		out.Object.Raw = t.Raw
+	case nil:
+	default:
+		out.Object.Object = in.Object
+	}
+	return nil
+}
+
+func Convert_v1_InternalEvent_To_v1_WatchEvent(in *InternalEvent, out *WatchEvent, s conversion.Scope) error {
+	return Convert_watch_Event_To_v1_WatchEvent((*watch.Event)(in), out, s)
+}
+
+func Convert_v1_WatchEvent_To_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error {
+	out.Type = watch.EventType(in.Type)
+	if in.Object.Object != nil {
+		out.Object = in.Object.Object
+	} else if in.Object.Raw != nil {
+		// TODO: handle other fields on Unknown and detect type
+		out.Object = &runtime.Unknown{
+			Raw:         in.Object.Raw,
+			ContentType: runtime.ContentTypeJSON,
+		}
+	}
+	return nil
+}
+
+func Convert_v1_WatchEvent_To_v1_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error {
+	return Convert_v1_WatchEvent_To_watch_Event(in, (*watch.Event)(out), s)
+}
+
+// InternalEvent makes watch.Event versioned
+// +protobuf=false
+type InternalEvent watch.Event
+
+func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind }
+func (e *WatchEvent) GetObjectKind() schema.ObjectKind    { return schema.EmptyObjectKind }
+func (e *InternalEvent) DeepCopyObject() runtime.Object {
+	if c := e.DeepCopy(); c != nil {
+		return c
+	} else {
+		return nil
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
new file mode 100644
index 0000000..06afd9b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
@@ -0,0 +1,535 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	url "net/url"
+	unsafe "unsafe"
+
+	resource "k8s.io/apimachinery/pkg/api/resource"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	fields "k8s.io/apimachinery/pkg/fields"
+	labels "k8s.io/apimachinery/pkg/labels"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	intstr "k8s.io/apimachinery/pkg/util/intstr"
+	watch "k8s.io/apimachinery/pkg/watch"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*CreateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_CreateOptions(a.(*url.Values), b.(*CreateOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*GetOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_GetOptions(a.(*url.Values), b.(*GetOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_ListOptions(a.(*url.Values), b.(*ListOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*PatchOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_PatchOptions(a.(*url.Values), b.(*PatchOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*TableOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_TableOptions(a.(*url.Values), b.(*TableOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*UpdateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_UpdateOptions(a.(*url.Values), b.(*UpdateOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*map[string]string)(nil), (*LabelSelector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Map_string_To_string_To_v1_LabelSelector(a.(*map[string]string), b.(*LabelSelector), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**bool)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_bool_To_bool(a.(**bool), b.(*bool), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**float64)(nil), (*float64)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_float64_To_float64(a.(**float64), b.(*float64), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**int32)(nil), (*int32)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_int32_To_int32(a.(**int32), b.(*int32), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**int64)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_int64_To_int(a.(**int64), b.(*int), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**int64)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_int64_To_int64(a.(**int64), b.(*int64), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(a.(**intstr.IntOrString), b.(*intstr.IntOrString), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_string_To_string(a.(**string), b.(*string), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((**Duration)(nil), (*Duration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Pointer_v1_Duration_To_v1_Duration(a.(**Duration), b.(*Duration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*[]int32)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_Slice_int32(a.(*[]string), b.(*[]int32), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*ResourceVersionMatch)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_v1_ResourceVersionMatch(a.(*[]string), b.(*ResourceVersionMatch), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*bool)(nil), (**bool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_bool_To_Pointer_bool(a.(*bool), b.(**bool), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*fields.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_fields_Selector_To_string(a.(*fields.Selector), b.(*string), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*float64)(nil), (**float64)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_float64_To_Pointer_float64(a.(*float64), b.(**float64), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*int32)(nil), (**int32)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_int32_To_Pointer_int32(a.(*int32), b.(**int32), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*int64)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_int64_To_Pointer_int64(a.(*int64), b.(**int64), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*int)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_int_To_Pointer_int64(a.(*int), b.(**int64), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (**intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(a.(*intstr.IntOrString), b.(**intstr.IntOrString), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_intstr_IntOrString_To_intstr_IntOrString(a.(*intstr.IntOrString), b.(*intstr.IntOrString), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*labels.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_labels_Selector_To_string(a.(*labels.Selector), b.(*string), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*resource.Quantity)(nil), (*resource.Quantity)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_resource_Quantity_To_resource_Quantity(a.(*resource.Quantity), b.(*resource.Quantity), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_string_To_Pointer_string(a.(*string), b.(**string), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*string)(nil), (*fields.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_string_To_fields_Selector(a.(*string), b.(*fields.Selector), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*string)(nil), (*labels.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_string_To_labels_Selector(a.(*string), b.(*labels.Selector), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*Duration)(nil), (**Duration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Duration_To_Pointer_v1_Duration(a.(*Duration), b.(**Duration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*InternalEvent)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_InternalEvent_To_v1_WatchEvent(a.(*InternalEvent), b.(*WatchEvent), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*LabelSelector)(nil), (*map[string]string)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_LabelSelector_To_Map_string_To_string(a.(*LabelSelector), b.(*map[string]string), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*ListMeta)(nil), (*ListMeta)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ListMeta_To_v1_ListMeta(a.(*ListMeta), b.(*ListMeta), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*MicroTime)(nil), (*MicroTime)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_MicroTime_To_v1_MicroTime(a.(*MicroTime), b.(*MicroTime), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*Time)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Time_To_v1_Time(a.(*Time), b.(*Time), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*TypeMeta)(nil), (*TypeMeta)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_TypeMeta_To_v1_TypeMeta(a.(*TypeMeta), b.(*TypeMeta), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*WatchEvent)(nil), (*InternalEvent)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_WatchEvent_To_v1_InternalEvent(a.(*WatchEvent), b.(*InternalEvent), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*WatchEvent)(nil), (*watch.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_WatchEvent_To_watch_Event(a.(*WatchEvent), b.(*watch.Event), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*watch.Event)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_watch_Event_To_v1_WatchEvent(a.(*watch.Event), b.(*WatchEvent), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
+		out.DryRun = *(*[]string)(unsafe.Pointer(&values))
+	} else {
+		out.DryRun = nil
+	}
+	if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil {
+			return err
+		}
+	} else {
+		out.FieldManager = ""
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_CreateOptions is an autogenerated conversion function.
+func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error {
+	return autoConvert_url_Values_To_v1_CreateOptions(in, out, s)
+}
+
+func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil {
+			return err
+		}
+	} else {
+		out.GracePeriodSeconds = nil
+	}
+	// INFO: in.Preconditions opted out of conversion generation
+	if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil {
+			return err
+		}
+	} else {
+		out.OrphanDependents = nil
+	}
+	if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 {
+		if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil {
+			return err
+		}
+	} else {
+		out.PropagationPolicy = nil
+	}
+	if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
+		out.DryRun = *(*[]string)(unsafe.Pointer(&values))
+	} else {
+		out.DryRun = nil
+	}
+	return nil
+}
+
+func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["export"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_bool(&values, &out.Export, s); err != nil {
+			return err
+		}
+	} else {
+		out.Export = false
+	}
+	if values, ok := map[string][]string(*in)["exact"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_bool(&values, &out.Exact, s); err != nil {
+			return err
+		}
+	} else {
+		out.Exact = false
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_ExportOptions is an autogenerated conversion function.
+func Convert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error {
+	return autoConvert_url_Values_To_v1_ExportOptions(in, out, s)
+}
+
+func autoConvert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil {
+			return err
+		}
+	} else {
+		out.ResourceVersion = ""
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_GetOptions is an autogenerated conversion function.
+func Convert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error {
+	return autoConvert_url_Values_To_v1_GetOptions(in, out, s)
+}
+
+func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["labelSelector"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.LabelSelector, s); err != nil {
+			return err
+		}
+	} else {
+		out.LabelSelector = ""
+	}
+	if values, ok := map[string][]string(*in)["fieldSelector"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldSelector, s); err != nil {
+			return err
+		}
+	} else {
+		out.FieldSelector = ""
+	}
+	if values, ok := map[string][]string(*in)["watch"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_bool(&values, &out.Watch, s); err != nil {
+			return err
+		}
+	} else {
+		out.Watch = false
+	}
+	if values, ok := map[string][]string(*in)["allowWatchBookmarks"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_bool(&values, &out.AllowWatchBookmarks, s); err != nil {
+			return err
+		}
+	} else {
+		out.AllowWatchBookmarks = false
+	}
+	if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil {
+			return err
+		}
+	} else {
+		out.ResourceVersion = ""
+	}
+	if values, ok := map[string][]string(*in)["resourceVersionMatch"]; ok && len(values) > 0 {
+		if err := Convert_Slice_string_To_v1_ResourceVersionMatch(&values, &out.ResourceVersionMatch, s); err != nil {
+			return err
+		}
+	} else {
+		out.ResourceVersionMatch = ""
+	}
+	if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil {
+			return err
+		}
+	} else {
+		out.TimeoutSeconds = nil
+	}
+	if values, ok := map[string][]string(*in)["limit"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_int64(&values, &out.Limit, s); err != nil {
+			return err
+		}
+	} else {
+		out.Limit = 0
+	}
+	if values, ok := map[string][]string(*in)["continue"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.Continue, s); err != nil {
+			return err
+		}
+	} else {
+		out.Continue = ""
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_ListOptions is an autogenerated conversion function.
+func Convert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error {
+	return autoConvert_url_Values_To_v1_ListOptions(in, out, s)
+}
+
+func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
+		out.DryRun = *(*[]string)(unsafe.Pointer(&values))
+	} else {
+		out.DryRun = nil
+	}
+	if values, ok := map[string][]string(*in)["force"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.Force, s); err != nil {
+			return err
+		}
+	} else {
+		out.Force = nil
+	}
+	if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil {
+			return err
+		}
+	} else {
+		out.FieldManager = ""
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_PatchOptions is an autogenerated conversion function.
+func Convert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error {
+	return autoConvert_url_Values_To_v1_PatchOptions(in, out, s)
+}
+
+func autoConvert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["-"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_bool(&values, &out.NoHeaders, s); err != nil {
+			return err
+		}
+	} else {
+		out.NoHeaders = false
+	}
+	if values, ok := map[string][]string(*in)["includeObject"]; ok && len(values) > 0 {
+		if err := Convert_Slice_string_To_v1_IncludeObjectPolicy(&values, &out.IncludeObject, s); err != nil {
+			return err
+		}
+	} else {
+		out.IncludeObject = ""
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_TableOptions is an autogenerated conversion function.
+func Convert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error {
+	return autoConvert_url_Values_To_v1_TableOptions(in, out, s)
+}
+
+func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error {
+	// WARNING: Field TypeMeta does not have json tag, skipping.
+
+	if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 {
+		out.DryRun = *(*[]string)(unsafe.Pointer(&values))
+	} else {
+		out.DryRun = nil
+	}
+	if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil {
+			return err
+		}
+	} else {
+		out.FieldManager = ""
+	}
+	return nil
+}
+
+// Convert_url_Values_To_v1_UpdateOptions is an autogenerated conversion function.
+func Convert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error {
+	return autoConvert_url_Values_To_v1_UpdateOptions(in, out, s)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000..1aa73bd
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -0,0 +1,1190 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	types "k8s.io/apimachinery/pkg/types"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIGroup) DeepCopyInto(out *APIGroup) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Versions != nil {
+		in, out := &in.Versions, &out.Versions
+		*out = make([]GroupVersionForDiscovery, len(*in))
+		copy(*out, *in)
+	}
+	out.PreferredVersion = in.PreferredVersion
+	if in.ServerAddressByClientCIDRs != nil {
+		in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
+		*out = make([]ServerAddressByClientCIDR, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroup.
+func (in *APIGroup) DeepCopy() *APIGroup {
+	if in == nil {
+		return nil
+	}
+	out := new(APIGroup)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIGroup) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIGroupList) DeepCopyInto(out *APIGroupList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]APIGroup, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupList.
+func (in *APIGroupList) DeepCopy() *APIGroupList {
+	if in == nil {
+		return nil
+	}
+	out := new(APIGroupList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIGroupList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIResource) DeepCopyInto(out *APIResource) {
+	*out = *in
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make(Verbs, len(*in))
+		copy(*out, *in)
+	}
+	if in.ShortNames != nil {
+		in, out := &in.ShortNames, &out.ShortNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Categories != nil {
+		in, out := &in.Categories, &out.Categories
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResource.
+func (in *APIResource) DeepCopy() *APIResource {
+	if in == nil {
+		return nil
+	}
+	out := new(APIResource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIResourceList) DeepCopyInto(out *APIResourceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.APIResources != nil {
+		in, out := &in.APIResources, &out.APIResources
+		*out = make([]APIResource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceList.
+func (in *APIResourceList) DeepCopy() *APIResourceList {
+	if in == nil {
+		return nil
+	}
+	out := new(APIResourceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIResourceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIVersions) DeepCopyInto(out *APIVersions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Versions != nil {
+		in, out := &in.Versions, &out.Versions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ServerAddressByClientCIDRs != nil {
+		in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
+		*out = make([]ServerAddressByClientCIDR, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersions.
+func (in *APIVersions) DeepCopy() *APIVersions {
+	if in == nil {
+		return nil
+	}
+	out := new(APIVersions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *APIVersions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Condition) DeepCopyInto(out *Condition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
+func (in *Condition) DeepCopy() *Condition {
+	if in == nil {
+		return nil
+	}
+	out := new(Condition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CreateOptions) DeepCopyInto(out *CreateOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateOptions.
+func (in *CreateOptions) DeepCopy() *CreateOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(CreateOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CreateOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.GracePeriodSeconds != nil {
+		in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Preconditions != nil {
+		in, out := &in.Preconditions, &out.Preconditions
+		*out = new(Preconditions)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.OrphanDependents != nil {
+		in, out := &in.OrphanDependents, &out.OrphanDependents
+		*out = new(bool)
+		**out = **in
+	}
+	if in.PropagationPolicy != nil {
+		in, out := &in.PropagationPolicy, &out.PropagationPolicy
+		*out = new(DeletionPropagation)
+		**out = **in
+	}
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions.
+func (in *DeleteOptions) DeepCopy() *DeleteOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(DeleteOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *DeleteOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Duration) DeepCopyInto(out *Duration) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Duration.
+func (in *Duration) DeepCopy() *Duration {
+	if in == nil {
+		return nil
+	}
+	out := new(Duration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExportOptions) DeepCopyInto(out *ExportOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportOptions.
+func (in *ExportOptions) DeepCopy() *ExportOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(ExportOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExportOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FieldsV1) DeepCopyInto(out *FieldsV1) {
+	*out = *in
+	if in.Raw != nil {
+		in, out := &in.Raw, &out.Raw
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1.
+func (in *FieldsV1) DeepCopy() *FieldsV1 {
+	if in == nil {
+		return nil
+	}
+	out := new(FieldsV1)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GetOptions) DeepCopyInto(out *GetOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetOptions.
+func (in *GetOptions) DeepCopy() *GetOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(GetOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GetOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupKind) DeepCopyInto(out *GroupKind) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupKind.
+func (in *GroupKind) DeepCopy() *GroupKind {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupKind)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupResource) DeepCopyInto(out *GroupResource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource.
+func (in *GroupResource) DeepCopy() *GroupResource {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupResource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersion) DeepCopyInto(out *GroupVersion) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersion.
+func (in *GroupVersion) DeepCopy() *GroupVersion {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersion)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersionForDiscovery) DeepCopyInto(out *GroupVersionForDiscovery) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionForDiscovery.
+func (in *GroupVersionForDiscovery) DeepCopy() *GroupVersionForDiscovery {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersionForDiscovery)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersionKind) DeepCopyInto(out *GroupVersionKind) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKind.
+func (in *GroupVersionKind) DeepCopy() *GroupVersionKind {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersionKind)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource.
+func (in *GroupVersionResource) DeepCopy() *GroupVersionResource {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupVersionResource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InternalEvent) DeepCopyInto(out *InternalEvent) {
+	*out = *in
+	if in.Object != nil {
+		out.Object = in.Object.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalEvent.
+func (in *InternalEvent) DeepCopy() *InternalEvent {
+	if in == nil {
+		return nil
+	}
+	out := new(InternalEvent)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LabelSelector) DeepCopyInto(out *LabelSelector) {
+	*out = *in
+	if in.MatchLabels != nil {
+		in, out := &in.MatchLabels, &out.MatchLabels
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.MatchExpressions != nil {
+		in, out := &in.MatchExpressions, &out.MatchExpressions
+		*out = make([]LabelSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector.
+func (in *LabelSelector) DeepCopy() *LabelSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(LabelSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LabelSelectorRequirement) DeepCopyInto(out *LabelSelectorRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorRequirement.
+func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(LabelSelectorRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *List) DeepCopyInto(out *List) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]runtime.RawExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
+func (in *List) DeepCopy() *List {
+	if in == nil {
+		return nil
+	}
+	out := new(List)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *List) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ListMeta) DeepCopyInto(out *ListMeta) {
+	*out = *in
+	if in.RemainingItemCount != nil {
+		in, out := &in.RemainingItemCount, &out.RemainingItemCount
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListMeta.
+func (in *ListMeta) DeepCopy() *ListMeta {
+	if in == nil {
+		return nil
+	}
+	out := new(ListMeta)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ListOptions) DeepCopyInto(out *ListOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.TimeoutSeconds != nil {
+		in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions.
+func (in *ListOptions) DeepCopy() *ListOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(ListOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ListOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) {
+	*out = *in
+	if in.Time != nil {
+		in, out := &in.Time, &out.Time
+		*out = (*in).DeepCopy()
+	}
+	if in.FieldsV1 != nil {
+		in, out := &in.FieldsV1, &out.FieldsV1
+		*out = new(FieldsV1)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldsEntry.
+func (in *ManagedFieldsEntry) DeepCopy() *ManagedFieldsEntry {
+	if in == nil {
+		return nil
+	}
+	out := new(ManagedFieldsEntry)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime.
+func (in *MicroTime) DeepCopy() *MicroTime {
+	if in == nil {
+		return nil
+	}
+	out := new(MicroTime)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) {
+	*out = *in
+	in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
+	if in.DeletionTimestamp != nil {
+		in, out := &in.DeletionTimestamp, &out.DeletionTimestamp
+		*out = (*in).DeepCopy()
+	}
+	if in.DeletionGracePeriodSeconds != nil {
+		in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Labels != nil {
+		in, out := &in.Labels, &out.Labels
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Annotations != nil {
+		in, out := &in.Annotations, &out.Annotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.OwnerReferences != nil {
+		in, out := &in.OwnerReferences, &out.OwnerReferences
+		*out = make([]OwnerReference, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Finalizers != nil {
+		in, out := &in.Finalizers, &out.Finalizers
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ManagedFields != nil {
+		in, out := &in.ManagedFields, &out.ManagedFields
+		*out = make([]ManagedFieldsEntry, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta.
+func (in *ObjectMeta) DeepCopy() *ObjectMeta {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectMeta)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OwnerReference) DeepCopyInto(out *OwnerReference) {
+	*out = *in
+	if in.Controller != nil {
+		in, out := &in.Controller, &out.Controller
+		*out = new(bool)
+		**out = **in
+	}
+	if in.BlockOwnerDeletion != nil {
+		in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference.
+func (in *OwnerReference) DeepCopy() *OwnerReference {
+	if in == nil {
+		return nil
+	}
+	out := new(OwnerReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata.
+func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata {
+	if in == nil {
+		return nil
+	}
+	out := new(PartialObjectMetadata)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PartialObjectMetadata, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
+func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
+	if in == nil {
+		return nil
+	}
+	out := new(PartialObjectMetadataList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Patch) DeepCopyInto(out *Patch) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patch.
+func (in *Patch) DeepCopy() *Patch {
+	if in == nil {
+		return nil
+	}
+	out := new(Patch)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PatchOptions) DeepCopyInto(out *PatchOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Force != nil {
+		in, out := &in.Force, &out.Force
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchOptions.
+func (in *PatchOptions) DeepCopy() *PatchOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PatchOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PatchOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preconditions) DeepCopyInto(out *Preconditions) {
+	*out = *in
+	if in.UID != nil {
+		in, out := &in.UID, &out.UID
+		*out = new(types.UID)
+		**out = **in
+	}
+	if in.ResourceVersion != nil {
+		in, out := &in.ResourceVersion, &out.ResourceVersion
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions.
+func (in *Preconditions) DeepCopy() *Preconditions {
+	if in == nil {
+		return nil
+	}
+	out := new(Preconditions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RootPaths) DeepCopyInto(out *RootPaths) {
+	*out = *in
+	if in.Paths != nil {
+		in, out := &in.Paths, &out.Paths
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootPaths.
+func (in *RootPaths) DeepCopy() *RootPaths {
+	if in == nil {
+		return nil
+	}
+	out := new(RootPaths)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR.
+func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR {
+	if in == nil {
+		return nil
+	}
+	out := new(ServerAddressByClientCIDR)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Status) DeepCopyInto(out *Status) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Details != nil {
+		in, out := &in.Details, &out.Details
+		*out = new(StatusDetails)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
+func (in *Status) DeepCopy() *Status {
+	if in == nil {
+		return nil
+	}
+	out := new(Status)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Status) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatusCause) DeepCopyInto(out *StatusCause) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCause.
+func (in *StatusCause) DeepCopy() *StatusCause {
+	if in == nil {
+		return nil
+	}
+	out := new(StatusCause)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StatusDetails) DeepCopyInto(out *StatusDetails) {
+	*out = *in
+	if in.Causes != nil {
+		in, out := &in.Causes, &out.Causes
+		*out = make([]StatusCause, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDetails.
+func (in *StatusDetails) DeepCopy() *StatusDetails {
+	if in == nil {
+		return nil
+	}
+	out := new(StatusDetails)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Table) DeepCopyInto(out *Table) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.ColumnDefinitions != nil {
+		in, out := &in.ColumnDefinitions, &out.ColumnDefinitions
+		*out = make([]TableColumnDefinition, len(*in))
+		copy(*out, *in)
+	}
+	if in.Rows != nil {
+		in, out := &in.Rows, &out.Rows
+		*out = make([]TableRow, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table.
+func (in *Table) DeepCopy() *Table {
+	if in == nil {
+		return nil
+	}
+	out := new(Table)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Table) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition.
+func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition {
+	if in == nil {
+		return nil
+	}
+	out := new(TableColumnDefinition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableOptions) DeepCopyInto(out *TableOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions.
+func (in *TableOptions) DeepCopy() *TableOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(TableOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TableOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableRow) DeepCopyInto(out *TableRow) {
+	clone := in.DeepCopy()
+	*out = *clone
+	return
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition.
+func (in *TableRowCondition) DeepCopy() *TableRowCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(TableRowCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time.
+func (in *Time) DeepCopy() *Time {
+	if in == nil {
+		return nil
+	}
+	out := new(Time)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Timestamp) DeepCopyInto(out *Timestamp) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timestamp.
+func (in *Timestamp) DeepCopy() *Timestamp {
+	if in == nil {
+		return nil
+	}
+	out := new(Timestamp)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UpdateOptions) DeepCopyInto(out *UpdateOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateOptions.
+func (in *UpdateOptions) DeepCopy() *UpdateOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(UpdateOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *UpdateOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Verbs) DeepCopyInto(out *Verbs) {
+	{
+		in := &in
+		*out = make(Verbs, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Verbs.
+func (in Verbs) DeepCopy() Verbs {
+	if in == nil {
+		return nil
+	}
+	out := new(Verbs)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WatchEvent) DeepCopyInto(out *WatchEvent) {
+	*out = *in
+	in.Object.DeepCopyInto(&out.Object)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchEvent.
+func (in *WatchEvent) DeepCopy() *WatchEvent {
+	if in == nil {
+		return nil
+	}
+	out := new(WatchEvent)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *WatchEvent) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
new file mode 100644
index 0000000..cce2e60
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
new file mode 100644
index 0000000..838d5b0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
@@ -0,0 +1,817 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+	"fmt"
+	"reflect"
+)
+
+type typePair struct {
+	source reflect.Type
+	dest   reflect.Type
+}
+
+type typeNamePair struct {
+	fieldType reflect.Type
+	fieldName string
+}
+
+// DebugLogger allows you to get debugging messages if necessary.
+type DebugLogger interface {
+	Logf(format string, args ...interface{})
+}
+
+type NameFunc func(t reflect.Type) string
+
+var DefaultNameFunc = func(t reflect.Type) string { return t.Name() }
+
+// ConversionFunc converts the object a into the object b, reusing arrays or objects
+// or pointers if necessary. It should return an error if the object cannot be converted
+// or if some data is invalid. If you do not wish a and b to share fields or nested
+// objects, you must copy a before calling this function.
+type ConversionFunc func(a, b interface{}, scope Scope) error
+
+// Converter knows how to convert one type to another.
+type Converter struct {
+	// Map from the conversion pair to a function which can
+	// do the conversion.
+	conversionFuncs          ConversionFuncs
+	generatedConversionFuncs ConversionFuncs
+
+	// Set of conversions that should be treated as a no-op
+	ignoredConversions        map[typePair]struct{}
+	ignoredUntypedConversions map[typePair]struct{}
+
+	// This is a map from a source field type and name, to a list of destination
+	// field type and name.
+	structFieldDests map[typeNamePair][]typeNamePair
+
+	// Allows for the opposite lookup of structFieldDests. So that SourceFromDest
+	// copy flag also works. So this is a map of destination field name, to potential
+	// source field name and type to look for.
+	structFieldSources map[typeNamePair][]typeNamePair
+
+	// Map from an input type to a function which can apply a key name mapping
+	inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc
+
+	// Map from an input type to a set of default conversion flags.
+	inputDefaultFlags map[reflect.Type]FieldMatchingFlags
+
+	// If non-nil, will be called to print helpful debugging info. Quite verbose.
+	Debug DebugLogger
+
+	// nameFunc is called to retrieve the name of a type; this name is used for the
+	// purpose of deciding whether two types match or not (i.e., will we attempt to
+	// do a conversion). The default returns the go type name.
+	nameFunc func(t reflect.Type) string
+}
+
+// NewConverter creates a new Converter object.
+func NewConverter(nameFn NameFunc) *Converter {
+	c := &Converter{
+		conversionFuncs:           NewConversionFuncs(),
+		generatedConversionFuncs:  NewConversionFuncs(),
+		ignoredConversions:        make(map[typePair]struct{}),
+		ignoredUntypedConversions: make(map[typePair]struct{}),
+		nameFunc:                  nameFn,
+		structFieldDests:          make(map[typeNamePair][]typeNamePair),
+		structFieldSources:        make(map[typeNamePair][]typeNamePair),
+
+		inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc),
+		inputDefaultFlags:      make(map[reflect.Type]FieldMatchingFlags),
+	}
+	c.RegisterUntypedConversionFunc(
+		(*[]byte)(nil), (*[]byte)(nil),
+		func(a, b interface{}, s Scope) error {
+			return Convert_Slice_byte_To_Slice_byte(a.(*[]byte), b.(*[]byte), s)
+		},
+	)
+	return c
+}
+
+// WithConversions returns a Converter that is a copy of c but with the additional
+// fns merged on top.
+func (c *Converter) WithConversions(fns ConversionFuncs) *Converter {
+	copied := *c
+	copied.conversionFuncs = c.conversionFuncs.Merge(fns)
+	return &copied
+}
+
+// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type.
+func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) {
+	return c.inputDefaultFlags[t], &Meta{
+		KeyNameMapping: c.inputFieldMappingFuncs[t],
+	}
+}
+
+// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte
+func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error {
+	if *in == nil {
+		*out = nil
+		return nil
+	}
+	*out = make([]byte, len(*in))
+	copy(*out, *in)
+	return nil
+}
+
+// Scope is passed to conversion funcs to allow them to continue an ongoing conversion.
+// If multiple converters exist in the system, Scope will allow you to use the correct one
+// from a conversion function--that is, the one your conversion function was called by.
+type Scope interface {
+	// Call Convert to convert sub-objects. Note that if you call it with your own exact
+	// parameters, you'll run out of stack space before anything useful happens.
+	Convert(src, dest interface{}, flags FieldMatchingFlags) error
+
+	// SrcTags and DestTags contain the struct tags that src and dest had, respectively.
+	// If the enclosing object was not a struct, then these will contain no tags, of course.
+	SrcTag() reflect.StructTag
+	DestTag() reflect.StructTag
+
+	// Flags returns the flags with which the conversion was started.
+	Flags() FieldMatchingFlags
+
+	// Meta returns any information originally passed to Convert.
+	Meta() *Meta
+}
+
+// FieldMappingFunc can convert an input field value into different values, depending on
+// the value of the source or destination struct tags.
+type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string)
+
+func NewConversionFuncs() ConversionFuncs {
+	return ConversionFuncs{
+		untyped: make(map[typePair]ConversionFunc),
+	}
+}
+
+type ConversionFuncs struct {
+	untyped map[typePair]ConversionFunc
+}
+
+// AddUntyped adds the provided conversion function to the lookup table for the types that are
+// supplied as a and b. a and b must be pointers or an error is returned. This method overwrites
+// previously defined functions.
+func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error {
+	tA, tB := reflect.TypeOf(a), reflect.TypeOf(b)
+	if tA.Kind() != reflect.Ptr {
+		return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", a)
+	}
+	if tB.Kind() != reflect.Ptr {
+		return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", b)
+	}
+	c.untyped[typePair{tA, tB}] = fn
+	return nil
+}
+
+// Merge returns a new ConversionFuncs that contains all conversions from
+// both other and c, with other conversions taking precedence.
+func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {
+	merged := NewConversionFuncs()
+	for k, v := range c.untyped {
+		merged.untyped[k] = v
+	}
+	for k, v := range other.untyped {
+		merged.untyped[k] = v
+	}
+	return merged
+}
+
+// Meta is supplied by Scheme, when it calls Convert.
+type Meta struct {
+	// KeyNameMapping is an optional function which may map the listed key (field name)
+	// into a source and destination value.
+	KeyNameMapping FieldMappingFunc
+	// Context is an optional field that callers may use to pass info to conversion functions.
+	Context interface{}
+}
+
+// scope contains information about an ongoing conversion.
+type scope struct {
+	converter *Converter
+	meta      *Meta
+	flags     FieldMatchingFlags
+
+	// srcStack & destStack are separate because they may not have a 1:1
+	// relationship.
+	srcStack  scopeStack
+	destStack scopeStack
+}
+
+type scopeStackElem struct {
+	tag   reflect.StructTag
+	value reflect.Value
+	key   string
+}
+
+type scopeStack []scopeStackElem
+
+func (s *scopeStack) pop() {
+	n := len(*s)
+	*s = (*s)[:n-1]
+}
+
+func (s *scopeStack) push(e scopeStackElem) {
+	*s = append(*s, e)
+}
+
+func (s *scopeStack) top() *scopeStackElem {
+	return &(*s)[len(*s)-1]
+}
+
+func (s scopeStack) describe() string {
+	desc := ""
+	if len(s) > 1 {
+		desc = "(" + s[1].value.Type().String() + ")"
+	}
+	for i, v := range s {
+		if i < 2 {
+			// First layer on stack is not real; second is handled specially above.
+			continue
+		}
+		if v.key == "" {
+			desc += fmt.Sprintf(".%v", v.value.Type())
+		} else {
+			desc += fmt.Sprintf(".%v", v.key)
+		}
+	}
+	return desc
+}
+
+// Formats src & dest as indices for printing.
+func (s *scope) setIndices(src, dest int) {
+	s.srcStack.top().key = fmt.Sprintf("[%v]", src)
+	s.destStack.top().key = fmt.Sprintf("[%v]", dest)
+}
+
+// Formats src & dest as map keys for printing.
+func (s *scope) setKeys(src, dest interface{}) {
+	s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src)
+	s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest)
+}
+
+// Convert continues a conversion.
+func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error {
+	return s.converter.Convert(src, dest, flags, s.meta)
+}
+
+// SrcTag returns the tag of the struct containing the current source item, if any.
+func (s *scope) SrcTag() reflect.StructTag {
+	return s.srcStack.top().tag
+}
+
+// DestTag returns the tag of the struct containing the current dest item, if any.
+func (s *scope) DestTag() reflect.StructTag {
+	return s.destStack.top().tag
+}
+
+// Flags returns the flags with which the current conversion was started.
+func (s *scope) Flags() FieldMatchingFlags {
+	return s.flags
+}
+
+// Meta returns the meta object that was originally passed to Convert.
+func (s *scope) Meta() *Meta {
+	return s.meta
+}
+
+// describe prints the path to get to the current (source, dest) values.
+func (s *scope) describe() (src, dest string) {
+	return s.srcStack.describe(), s.destStack.describe()
+}
+
+// error makes an error that includes information about where we were in the objects
+// we were asked to convert.
+func (s *scope) errorf(message string, args ...interface{}) error {
+	srcPath, destPath := s.describe()
+	where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath)
+	return fmt.Errorf(where+message, args...)
+}
+
+// Verifies whether a conversion function has a correct signature.
+func verifyConversionFunctionSignature(ft reflect.Type) error {
+	if ft.Kind() != reflect.Func {
+		return fmt.Errorf("expected func, got: %v", ft)
+	}
+	if ft.NumIn() != 3 {
+		return fmt.Errorf("expected three 'in' params, got: %v", ft)
+	}
+	if ft.NumOut() != 1 {
+		return fmt.Errorf("expected one 'out' param, got: %v", ft)
+	}
+	if ft.In(0).Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft)
+	}
+	if ft.In(1).Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft)
+	}
+	scopeType := Scope(nil)
+	if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a {
+		return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft)
+	}
+	var forErrorType error
+	// This convolution is necessary, otherwise TypeOf picks up on the fact
+	// that forErrorType is nil.
+	errorType := reflect.TypeOf(&forErrorType).Elem()
+	if ft.Out(0) != errorType {
+		return fmt.Errorf("expected error return, got: %v", ft)
+	}
+	return nil
+}
+
+// RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (c *Converter) RegisterUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error {
+	return c.conversionFuncs.AddUntyped(a, b, fn)
+}
+
+// RegisterGeneratedUntypedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (c *Converter) RegisterGeneratedUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error {
+	return c.generatedConversionFuncs.AddUntyped(a, b, fn)
+}
+
+// RegisterIgnoredConversion registers a "no-op" for conversion, where any requested
+// conversion between from and to is ignored.
+func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error {
+	typeFrom := reflect.TypeOf(from)
+	typeTo := reflect.TypeOf(to)
+	if reflect.TypeOf(from).Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom)
+	}
+	if typeTo.Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo)
+	}
+	c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{}
+	c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{}
+	return nil
+}
+
+// RegisterInputDefaults registers a field name mapping function, used when converting
+// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping
+// applied automatically if the input matches in. A set of default flags for the input conversion
+// may also be provided, which will be used when no explicit flags are requested.
+func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error {
+	fv := reflect.ValueOf(in)
+	ft := fv.Type()
+	if ft.Kind() != reflect.Ptr {
+		return fmt.Errorf("expected pointer 'in' argument, got: %v", ft)
+	}
+	c.inputFieldMappingFuncs[ft] = fn
+	c.inputDefaultFlags[ft] = defaultFlags
+	return nil
+}
+
+// FieldMatchingFlags contains a list of ways in which struct fields could be
+// copied. These constants may be | combined.
+type FieldMatchingFlags int
+
+const (
+	// Loop through destination fields, search for matching source
+	// field to copy it from. Source fields with no corresponding
+	// destination field will be ignored. If SourceToDest is
+	// specified, this flag is ignored. If neither is specified,
+	// or no flags are passed, this flag is the default.
+	DestFromSource FieldMatchingFlags = 0
+	// Loop through source fields, search for matching dest field
+	// to copy it into. Destination fields with no corresponding
+	// source field will be ignored.
+	SourceToDest FieldMatchingFlags = 1 << iota
+	// Don't treat it as an error if the corresponding source or
+	// dest field can't be found.
+	IgnoreMissingFields
+	// Don't require type names to match.
+	AllowDifferentFieldTypeNames
+)
+
+// IsSet returns true if the given flag or combination of flags is set.
+func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool {
+	if flag == DestFromSource {
+		// The bit logic doesn't work on the default value.
+		return f&SourceToDest != SourceToDest
+	}
+	return f&flag == flag
+}
+
+// Convert will translate src to dest if it knows how. Both must be pointers.
+// If no conversion func is registered and the default copying mechanism
+// doesn't work on this type pair, an error will be returned.
+// Read the comments on the various FieldMatchingFlags constants to understand
+// what the 'flags' parameter does.
+// 'meta' is given to allow you to pass information to conversion functions,
+// it is not used by Convert() other than storing it in the scope.
+// Not safe for objects with cyclic references!
+func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error {
+	return c.doConversion(src, dest, flags, meta, c.convert)
+}
+
+type conversionFunc func(sv, dv reflect.Value, scope *scope) error
+
+func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error {
+	pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)}
+	scope := &scope{
+		converter: c,
+		flags:     flags,
+		meta:      meta,
+	}
+
+	// ignore conversions of this type
+	if _, ok := c.ignoredUntypedConversions[pair]; ok {
+		return nil
+	}
+	if fn, ok := c.conversionFuncs.untyped[pair]; ok {
+		return fn(src, dest, scope)
+	}
+	if fn, ok := c.generatedConversionFuncs.untyped[pair]; ok {
+		return fn(src, dest, scope)
+	}
+
+	dv, err := EnforcePtr(dest)
+	if err != nil {
+		return err
+	}
+	sv, err := EnforcePtr(src)
+	if err != nil {
+		return err
+	}
+	return fmt.Errorf("converting (%s) to (%s): unknown conversion", sv.Type(), dv.Type())
+
+	// TODO: Everything past this point is deprecated.
+	//  Remove in 1.20 once we're sure it didn't break anything.
+
+	// Leave something on the stack, so that calls to struct tag getters never fail.
+	scope.srcStack.push(scopeStackElem{})
+	scope.destStack.push(scopeStackElem{})
+	return f(sv, dv, scope)
+}
+
+// callUntyped calls predefined conversion func.
+func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error {
+	if !dv.CanAddr() {
+		return scope.errorf("cant addr dest")
+	}
+	var svPointer reflect.Value
+	if sv.CanAddr() {
+		svPointer = sv.Addr()
+	} else {
+		svPointer = reflect.New(sv.Type())
+		svPointer.Elem().Set(sv)
+	}
+	dvPointer := dv.Addr()
+	return f(svPointer.Interface(), dvPointer.Interface(), scope)
+}
+
+// convert recursively copies sv into dv, calling an appropriate conversion function if
+// one is registered.
+func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error {
+	dt, st := dv.Type(), sv.Type()
+	pair := typePair{st, dt}
+
+	// ignore conversions of this type
+	if _, ok := c.ignoredConversions[pair]; ok {
+		if c.Debug != nil {
+			c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt)
+		}
+		return nil
+	}
+
+	// Convert sv to dv.
+	pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())}
+	if f, ok := c.conversionFuncs.untyped[pair]; ok {
+		return c.callUntyped(sv, dv, f, scope)
+	}
+	if f, ok := c.generatedConversionFuncs.untyped[pair]; ok {
+		return c.callUntyped(sv, dv, f, scope)
+	}
+
+	if !dv.CanSet() {
+		return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)")
+	}
+
+	if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) {
+		return scope.errorf(
+			"type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.",
+			c.nameFunc(st), c.nameFunc(dt), st, dt)
+	}
+
+	switch st.Kind() {
+	case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct:
+		// Don't copy these via assignment/conversion!
+	default:
+		// This should handle all simple types.
+		if st.AssignableTo(dt) {
+			dv.Set(sv)
+			return nil
+		}
+		if st.ConvertibleTo(dt) {
+			dv.Set(sv.Convert(dt))
+			return nil
+		}
+	}
+
+	if c.Debug != nil {
+		c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt)
+	}
+
+	scope.srcStack.push(scopeStackElem{value: sv})
+	scope.destStack.push(scopeStackElem{value: dv})
+	defer scope.srcStack.pop()
+	defer scope.destStack.pop()
+
+	switch dv.Kind() {
+	case reflect.Struct:
+		return c.convertKV(toKVValue(sv), toKVValue(dv), scope)
+	case reflect.Slice:
+		if sv.IsNil() {
+			// Don't make a zero-length slice.
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap()))
+		for i := 0; i < sv.Len(); i++ {
+			scope.setIndices(i, i)
+			if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil {
+				return err
+			}
+		}
+	case reflect.Ptr:
+		if sv.IsNil() {
+			// Don't copy a nil ptr!
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		dv.Set(reflect.New(dt.Elem()))
+		switch st.Kind() {
+		case reflect.Ptr, reflect.Interface:
+			return c.convert(sv.Elem(), dv.Elem(), scope)
+		default:
+			return c.convert(sv, dv.Elem(), scope)
+		}
+	case reflect.Map:
+		if sv.IsNil() {
+			// Don't copy a nil ptr!
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		dv.Set(reflect.MakeMap(dt))
+		for _, sk := range sv.MapKeys() {
+			dk := reflect.New(dt.Key()).Elem()
+			if err := c.convert(sk, dk, scope); err != nil {
+				return err
+			}
+			dkv := reflect.New(dt.Elem()).Elem()
+			scope.setKeys(sk.Interface(), dk.Interface())
+			// TODO:  sv.MapIndex(sk) may return a value with CanAddr() == false,
+			// because a map[string]struct{} does not allow a pointer reference.
+			// Calling a custom conversion function defined for the map value
+			// will panic. Example is PodInfo map[string]ContainerStatus.
+			if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil {
+				return err
+			}
+			dv.SetMapIndex(dk, dkv)
+		}
+	case reflect.Interface:
+		if sv.IsNil() {
+			// Don't copy a nil interface!
+			dv.Set(reflect.Zero(dt))
+			return nil
+		}
+		tmpdv := reflect.New(sv.Elem().Type()).Elem()
+		if err := c.convert(sv.Elem(), tmpdv, scope); err != nil {
+			return err
+		}
+		dv.Set(reflect.ValueOf(tmpdv.Interface()))
+		return nil
+	default:
+		return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt)
+	}
+	return nil
+}
+
+var stringType = reflect.TypeOf("")
+
+func toKVValue(v reflect.Value) kvValue {
+	switch v.Kind() {
+	case reflect.Struct:
+		return structAdaptor(v)
+	case reflect.Map:
+		if v.Type().Key().AssignableTo(stringType) {
+			return stringMapAdaptor(v)
+		}
+	}
+
+	return nil
+}
+
+// kvValue lets us write the same conversion logic to work with both maps
+// and structs. Only maps with string keys make sense for this.
+type kvValue interface {
+	// returns all keys, as a []string.
+	keys() []string
+	// Will just return "" for maps.
+	tagOf(key string) reflect.StructTag
+	// Will return the zero Value if the key doesn't exist.
+	value(key string) reflect.Value
+	// Maps require explicit setting-- will do nothing for structs.
+	// Returns false on failure.
+	confirmSet(key string, v reflect.Value) bool
+}
+
+type stringMapAdaptor reflect.Value
+
+func (a stringMapAdaptor) len() int {
+	return reflect.Value(a).Len()
+}
+
+func (a stringMapAdaptor) keys() []string {
+	v := reflect.Value(a)
+	keys := make([]string, v.Len())
+	for i, v := range v.MapKeys() {
+		if v.IsNil() {
+			continue
+		}
+		switch t := v.Interface().(type) {
+		case string:
+			keys[i] = t
+		}
+	}
+	return keys
+}
+
+func (a stringMapAdaptor) tagOf(key string) reflect.StructTag {
+	return ""
+}
+
+func (a stringMapAdaptor) value(key string) reflect.Value {
+	return reflect.Value(a).MapIndex(reflect.ValueOf(key))
+}
+
+func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool {
+	return true
+}
+
+type structAdaptor reflect.Value
+
+func (a structAdaptor) len() int {
+	v := reflect.Value(a)
+	return v.Type().NumField()
+}
+
+func (a structAdaptor) keys() []string {
+	v := reflect.Value(a)
+	t := v.Type()
+	keys := make([]string, t.NumField())
+	for i := range keys {
+		keys[i] = t.Field(i).Name
+	}
+	return keys
+}
+
+func (a structAdaptor) tagOf(key string) reflect.StructTag {
+	v := reflect.Value(a)
+	field, ok := v.Type().FieldByName(key)
+	if ok {
+		return field.Tag
+	}
+	return ""
+}
+
+func (a structAdaptor) value(key string) reflect.Value {
+	v := reflect.Value(a)
+	return v.FieldByName(key)
+}
+
+func (a structAdaptor) confirmSet(key string, v reflect.Value) bool {
+	return true
+}
+
+// convertKV can convert things that consist of key/value pairs, like structs
+// and some maps.
+func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error {
+	if skv == nil || dkv == nil {
+		// TODO: add keys to stack to support really understandable error messages.
+		return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv)
+	}
+
+	lister := dkv
+	if scope.flags.IsSet(SourceToDest) {
+		lister = skv
+	}
+
+	var mapping FieldMappingFunc
+	if scope.meta != nil && scope.meta.KeyNameMapping != nil {
+		mapping = scope.meta.KeyNameMapping
+	}
+
+	for _, key := range lister.keys() {
+		if found, err := c.checkField(key, skv, dkv, scope); found {
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		stag := skv.tagOf(key)
+		dtag := dkv.tagOf(key)
+		skey := key
+		dkey := key
+		if mapping != nil {
+			skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag)
+		}
+
+		df := dkv.value(dkey)
+		sf := skv.value(skey)
+		if !df.IsValid() || !sf.IsValid() {
+			switch {
+			case scope.flags.IsSet(IgnoreMissingFields):
+				// No error.
+			case scope.flags.IsSet(SourceToDest):
+				return scope.errorf("%v not present in dest", dkey)
+			default:
+				return scope.errorf("%v not present in src", skey)
+			}
+			continue
+		}
+		scope.srcStack.top().key = skey
+		scope.srcStack.top().tag = stag
+		scope.destStack.top().key = dkey
+		scope.destStack.top().tag = dtag
+		if err := c.convert(sf, df, scope); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// checkField returns true if the field name matches any of the struct
+// field copying rules. The error should be ignored if it returns false.
+func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) {
+	replacementMade := false
+	if scope.flags.IsSet(DestFromSource) {
+		df := dkv.value(fieldName)
+		if !df.IsValid() {
+			return false, nil
+		}
+		destKey := typeNamePair{df.Type(), fieldName}
+		// Check each of the potential source (type, name) pairs to see if they're
+		// present in sv.
+		for _, potentialSourceKey := range c.structFieldSources[destKey] {
+			sf := skv.value(potentialSourceKey.fieldName)
+			if !sf.IsValid() {
+				continue
+			}
+			if sf.Type() == potentialSourceKey.fieldType {
+				// Both the source's name and type matched, so copy.
+				scope.srcStack.top().key = potentialSourceKey.fieldName
+				scope.destStack.top().key = fieldName
+				if err := c.convert(sf, df, scope); err != nil {
+					return true, err
+				}
+				dkv.confirmSet(fieldName, df)
+				replacementMade = true
+			}
+		}
+		return replacementMade, nil
+	}
+
+	sf := skv.value(fieldName)
+	if !sf.IsValid() {
+		return false, nil
+	}
+	srcKey := typeNamePair{sf.Type(), fieldName}
+	// Check each of the potential dest (type, name) pairs to see if they're
+	// present in dv.
+	for _, potentialDestKey := range c.structFieldDests[srcKey] {
+		df := dkv.value(potentialDestKey.fieldName)
+		if !df.IsValid() {
+			continue
+		}
+		if df.Type() == potentialDestKey.fieldType {
+			// Both the dest's name and type matched, so copy.
+			scope.srcStack.top().key = fieldName
+			scope.destStack.top().key = potentialDestKey.fieldName
+			if err := c.convert(sf, df, scope); err != nil {
+				return true, err
+			}
+			dkv.confirmSet(potentialDestKey.fieldName, df)
+			replacementMade = true
+		}
+	}
+	return replacementMade, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go
new file mode 100644
index 0000000..f21abe1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+	"k8s.io/apimachinery/third_party/forked/golang/reflect"
+)
+
+// The code for this type must be located in third_party, since it forks from
+// go std lib. But for convenience, we expose the type here, too.
+type Equalities struct {
+	reflect.Equalities
+}
+
+// For convenience, panics on errors
+func EqualitiesOrDie(funcs ...interface{}) Equalities {
+	e := Equalities{reflect.Equalities{}}
+	if err := e.AddFuncs(funcs...); err != nil {
+		panic(err)
+	}
+	return e
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
new file mode 100644
index 0000000..7415d81
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package conversion provides go object versioning.
+//
+// Specifically, conversion provides a way for you to define multiple versions
+// of the same object. You may write functions which implement conversion logic,
+// but for the fields which did not change, copying is automated. This makes it
+// easy to modify the structures you use in memory without affecting the format
+// you store on disk or respond to in your external API calls.
+package conversion // import "k8s.io/apimachinery/pkg/conversion"
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go
new file mode 100644
index 0000000..4ebc1eb
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package conversion
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value
+// of the dereferenced pointer, ensuring that it is settable/addressable.
+// Returns an error if this is not possible.
+func EnforcePtr(obj interface{}) (reflect.Value, error) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		if v.Kind() == reflect.Invalid {
+			return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind")
+		}
+		return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type())
+	}
+	if v.IsNil() {
+		return reflect.Value{}, fmt.Errorf("expected pointer, but got nil")
+	}
+	return v.Elem(), nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
new file mode 100644
index 0000000..2f0dd00
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go
@@ -0,0 +1,194 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package queryparams
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"strings"
+)
+
+// Marshaler converts an object to a query parameter string representation
+type Marshaler interface {
+	MarshalQueryParameter() (string, error)
+}
+
+// Unmarshaler converts a string representation to an object
+type Unmarshaler interface {
+	UnmarshalQueryParameter(string) error
+}
+
+func jsonTag(field reflect.StructField) (string, bool) {
+	structTag := field.Tag.Get("json")
+	if len(structTag) == 0 {
+		return "", false
+	}
+	parts := strings.Split(structTag, ",")
+	tag := parts[0]
+	if tag == "-" {
+		tag = ""
+	}
+	omitempty := false
+	parts = parts[1:]
+	for _, part := range parts {
+		if part == "omitempty" {
+			omitempty = true
+			break
+		}
+	}
+	return tag, omitempty
+}
+
+func isPointerKind(kind reflect.Kind) bool {
+	return kind == reflect.Ptr
+}
+
+func isStructKind(kind reflect.Kind) bool {
+	return kind == reflect.Struct
+}
+
+func isValueKind(kind reflect.Kind) bool {
+	switch kind {
+	case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16,
+		reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8,
+		reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32,
+		reflect.Float64, reflect.Complex64, reflect.Complex128:
+		return true
+	default:
+		return false
+	}
+}
+
+func zeroValue(value reflect.Value) bool {
+	return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface())
+}
+
+func customMarshalValue(value reflect.Value) (reflect.Value, bool) {
+	// Return unless we implement a custom query marshaler
+	if !value.CanInterface() {
+		return reflect.Value{}, false
+	}
+
+	marshaler, ok := value.Interface().(Marshaler)
+	if !ok {
+		if !isPointerKind(value.Kind()) && value.CanAddr() {
+			marshaler, ok = value.Addr().Interface().(Marshaler)
+			if !ok {
+				return reflect.Value{}, false
+			}
+		} else {
+			return reflect.Value{}, false
+		}
+	}
+
+	// Don't invoke functions on nil pointers
+	// If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response
+	if isPointerKind(value.Kind()) && zeroValue(value) {
+		return reflect.ValueOf(""), true
+	}
+
+	// Get the custom marshalled value
+	v, err := marshaler.MarshalQueryParameter()
+	if err != nil {
+		return reflect.Value{}, false
+	}
+	return reflect.ValueOf(v), true
+}
+
+func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) {
+	if omitempty && zeroValue(value) {
+		return
+	}
+	val := ""
+	iValue := fmt.Sprintf("%v", value.Interface())
+
+	if iValue != "<nil>" {
+		val = iValue
+	}
+	values.Add(tag, val)
+}
+
+func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) {
+	for i := 0; i < list.Len(); i++ {
+		addParam(values, tag, omitempty, list.Index(i))
+	}
+}
+
+// Convert takes an object and converts it to a url.Values object using JSON tags as
+// parameter names. Only top-level simple values, arrays, and slices are serialized.
+// Embedded structs, maps, etc. will not be serialized.
+func Convert(obj interface{}) (url.Values, error) {
+	result := url.Values{}
+	if obj == nil {
+		return result, nil
+	}
+	var sv reflect.Value
+	switch reflect.TypeOf(obj).Kind() {
+	case reflect.Ptr, reflect.Interface:
+		sv = reflect.ValueOf(obj).Elem()
+	default:
+		return nil, fmt.Errorf("expecting a pointer or interface")
+	}
+	st := sv.Type()
+	if !isStructKind(st.Kind()) {
+		return nil, fmt.Errorf("expecting a pointer to a struct")
+	}
+
+	// Check all object fields
+	convertStruct(result, st, sv)
+
+	return result, nil
+}
+
+func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) {
+	for i := 0; i < st.NumField(); i++ {
+		field := sv.Field(i)
+		tag, omitempty := jsonTag(st.Field(i))
+		if len(tag) == 0 {
+			continue
+		}
+		ft := field.Type()
+
+		kind := ft.Kind()
+		if isPointerKind(kind) {
+			ft = ft.Elem()
+			kind = ft.Kind()
+			if !field.IsNil() {
+				field = reflect.Indirect(field)
+				// If the field is non-nil, it should be added to params
+				// and the omitempty should be overwite to false
+				omitempty = false
+			}
+		}
+
+		switch {
+		case isValueKind(kind):
+			addParam(result, tag, omitempty, field)
+		case kind == reflect.Array || kind == reflect.Slice:
+			if isValueKind(ft.Elem().Kind()) {
+				addListOfParams(result, tag, omitempty, field)
+			}
+		case isStructKind(kind) && !(zeroValue(field) && omitempty):
+			if marshalValue, ok := customMarshalValue(field); ok {
+				addParam(result, tag, omitempty, marshalValue)
+			} else {
+				convertStruct(result, ft, field)
+			}
+		}
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
new file mode 100644
index 0000000..7b763de
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package queryparams provides conversion from versioned
+// runtime objects to URL query values
+package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams"
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
new file mode 100644
index 0000000..c39b803
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fields implements a simple field system, parsing and matching
+// selectors with sets of fields.
+package fields // import "k8s.io/apimachinery/pkg/fields"
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/fields.go b/vendor/k8s.io/apimachinery/pkg/fields/fields.go
new file mode 100644
index 0000000..623b27e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/fields.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import (
+	"sort"
+	"strings"
+)
+
+// Fields allows you to present fields independently from their storage.
+type Fields interface {
+	// Has returns whether the provided field exists.
+	Has(field string) (exists bool)
+
+	// Get returns the value for the provided field.
+	Get(field string) (value string)
+}
+
+// Set is a map of field:value. It implements Fields.
+type Set map[string]string
+
+// String returns all fields listed as a human readable string.
+// Conveniently, exactly the format that ParseSelector takes.
+func (ls Set) String() string {
+	selector := make([]string, 0, len(ls))
+	for key, value := range ls {
+		selector = append(selector, key+"="+value)
+	}
+	// Sort for determinism.
+	sort.StringSlice(selector).Sort()
+	return strings.Join(selector, ",")
+}
+
+// Has returns whether the provided field exists in the map.
+func (ls Set) Has(field string) bool {
+	_, exists := ls[field]
+	return exists
+}
+
+// Get returns the value in the map for the provided field.
+func (ls Set) Get(field string) string {
+	return ls[field]
+}
+
+// AsSelector converts fields into a selectors.
+func (ls Set) AsSelector() Selector {
+	return SelectorFromSet(ls)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/requirements.go b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go
new file mode 100644
index 0000000..70d94de
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import "k8s.io/apimachinery/pkg/selection"
+
+// Requirements is AND of all requirements.
+type Requirements []Requirement
+
+// Requirement contains a field, a value, and an operator that relates the field and value.
+// This is currently for reading internal selection information of field selector.
+type Requirement struct {
+	Operator selection.Operator
+	Field    string
+	Value    string
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
new file mode 100644
index 0000000..a9e2049
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go
@@ -0,0 +1,478 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fields
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/selection"
+)
+
+// Selector represents a field selector.
+type Selector interface {
+	// Matches returns true if this selector matches the given set of fields.
+	Matches(Fields) bool
+
+	// Empty returns true if this selector does not restrict the selection space.
+	Empty() bool
+
+	// RequiresExactMatch allows a caller to introspect whether a given selector
+	// requires a single specific field to be set, and if so returns the value it
+	// requires.
+	RequiresExactMatch(field string) (value string, found bool)
+
+	// Transform returns a new copy of the selector after TransformFunc has been
+	// applied to the entire selector, or an error if fn returns an error.
+	// If for a given requirement both field and value are transformed to empty
+	// string, the requirement is skipped.
+	Transform(fn TransformFunc) (Selector, error)
+
+	// Requirements converts this interface to Requirements to expose
+	// more detailed selection information.
+	Requirements() Requirements
+
+	// String returns a human readable string that represents this selector.
+	String() string
+
+	// Make a deep copy of the selector.
+	DeepCopySelector() Selector
+}
+
+type nothingSelector struct{}
+
+func (n nothingSelector) Matches(_ Fields) bool      { return false }
+func (n nothingSelector) Empty() bool                { return false }
+func (n nothingSelector) String() string             { return "" }
+func (n nothingSelector) Requirements() Requirements { return nil }
+func (n nothingSelector) DeepCopySelector() Selector { return n }
+func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) {
+	return "", false
+}
+func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil }
+
+// Nothing returns a selector that matches no fields
+func Nothing() Selector {
+	return nothingSelector{}
+}
+
+// Everything returns a selector that matches all fields.
+func Everything() Selector {
+	return andTerm{}
+}
+
+type hasTerm struct {
+	field, value string
+}
+
+func (t *hasTerm) Matches(ls Fields) bool {
+	return ls.Get(t.field) == t.value
+}
+
+func (t *hasTerm) Empty() bool {
+	return false
+}
+
+func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) {
+	if t.field == field {
+		return t.value, true
+	}
+	return "", false
+}
+
+func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) {
+	field, value, err := fn(t.field, t.value)
+	if err != nil {
+		return nil, err
+	}
+	if len(field) == 0 && len(value) == 0 {
+		return Everything(), nil
+	}
+	return &hasTerm{field, value}, nil
+}
+
+func (t *hasTerm) Requirements() Requirements {
+	return []Requirement{{
+		Field:    t.field,
+		Operator: selection.Equals,
+		Value:    t.value,
+	}}
+}
+
+func (t *hasTerm) String() string {
+	return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value))
+}
+
+func (t *hasTerm) DeepCopySelector() Selector {
+	if t == nil {
+		return nil
+	}
+	out := new(hasTerm)
+	*out = *t
+	return out
+}
+
+type notHasTerm struct {
+	field, value string
+}
+
+func (t *notHasTerm) Matches(ls Fields) bool {
+	return ls.Get(t.field) != t.value
+}
+
+func (t *notHasTerm) Empty() bool {
+	return false
+}
+
+func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) {
+	return "", false
+}
+
+func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) {
+	field, value, err := fn(t.field, t.value)
+	if err != nil {
+		return nil, err
+	}
+	if len(field) == 0 && len(value) == 0 {
+		return Everything(), nil
+	}
+	return &notHasTerm{field, value}, nil
+}
+
+func (t *notHasTerm) Requirements() Requirements {
+	return []Requirement{{
+		Field:    t.field,
+		Operator: selection.NotEquals,
+		Value:    t.value,
+	}}
+}
+
+func (t *notHasTerm) String() string {
+	return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value))
+}
+
+func (t *notHasTerm) DeepCopySelector() Selector {
+	if t == nil {
+		return nil
+	}
+	out := new(notHasTerm)
+	*out = *t
+	return out
+}
+
+type andTerm []Selector
+
+func (t andTerm) Matches(ls Fields) bool {
+	for _, q := range t {
+		if !q.Matches(ls) {
+			return false
+		}
+	}
+	return true
+}
+
+func (t andTerm) Empty() bool {
+	if t == nil {
+		return true
+	}
+	if len([]Selector(t)) == 0 {
+		return true
+	}
+	for i := range t {
+		if !t[i].Empty() {
+			return false
+		}
+	}
+	return true
+}
+
+func (t andTerm) RequiresExactMatch(field string) (string, bool) {
+	if t == nil || len([]Selector(t)) == 0 {
+		return "", false
+	}
+	for i := range t {
+		if value, found := t[i].RequiresExactMatch(field); found {
+			return value, found
+		}
+	}
+	return "", false
+}
+
+func (t andTerm) Transform(fn TransformFunc) (Selector, error) {
+	next := make([]Selector, 0, len([]Selector(t)))
+	for _, s := range []Selector(t) {
+		n, err := s.Transform(fn)
+		if err != nil {
+			return nil, err
+		}
+		if !n.Empty() {
+			next = append(next, n)
+		}
+	}
+	return andTerm(next), nil
+}
+
+func (t andTerm) Requirements() Requirements {
+	reqs := make([]Requirement, 0, len(t))
+	for _, s := range []Selector(t) {
+		rs := s.Requirements()
+		reqs = append(reqs, rs...)
+	}
+	return reqs
+}
+
+func (t andTerm) String() string {
+	var terms []string
+	for _, q := range t {
+		terms = append(terms, q.String())
+	}
+	return strings.Join(terms, ",")
+}
+
+func (t andTerm) DeepCopySelector() Selector {
+	if t == nil {
+		return nil
+	}
+	out := make([]Selector, len(t))
+	for i := range t {
+		out[i] = t[i].DeepCopySelector()
+	}
+	return andTerm(out)
+}
+
+// SelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil Set is considered equivalent to Everything().
+func SelectorFromSet(ls Set) Selector {
+	if ls == nil {
+		return Everything()
+	}
+	items := make([]Selector, 0, len(ls))
+	for field, value := range ls {
+		items = append(items, &hasTerm{field: field, value: value})
+	}
+	if len(items) == 1 {
+		return items[0]
+	}
+	return andTerm(items)
+}
+
+// valueEscaper prefixes \,= characters with a backslash
+var valueEscaper = strings.NewReplacer(
+	// escape \ characters
+	`\`, `\\`,
+	// then escape , and = characters to allow unambiguous parsing of the value in a fieldSelector
+	`,`, `\,`,
+	`=`, `\=`,
+)
+
+// EscapeValue escapes an arbitrary literal string for use as a fieldSelector value
+func EscapeValue(s string) string {
+	return valueEscaper.Replace(s)
+}
+
+// InvalidEscapeSequence indicates an error occurred unescaping a field selector
+type InvalidEscapeSequence struct {
+	sequence string
+}
+
+func (i InvalidEscapeSequence) Error() string {
+	return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence)
+}
+
+// UnescapedRune indicates an error occurred unescaping a field selector
+type UnescapedRune struct {
+	r rune
+}
+
+func (i UnescapedRune) Error() string {
+	return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r)
+}
+
+// UnescapeValue unescapes a fieldSelector value and returns the original literal value.
+// May return the original string if it contains no escaped or special characters.
+func UnescapeValue(s string) (string, error) {
+	// if there's no escaping or special characters, just return to avoid allocation
+	if !strings.ContainsAny(s, `\,=`) {
+		return s, nil
+	}
+
+	v := bytes.NewBuffer(make([]byte, 0, len(s)))
+	inSlash := false
+	for _, c := range s {
+		if inSlash {
+			switch c {
+			case '\\', ',', '=':
+				// omit the \ for recognized escape sequences
+				v.WriteRune(c)
+			default:
+				// error on unrecognized escape sequences
+				return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})}
+			}
+			inSlash = false
+			continue
+		}
+
+		switch c {
+		case '\\':
+			inSlash = true
+		case ',', '=':
+			// unescaped , and = characters are not allowed in field selector values
+			return "", UnescapedRune{r: c}
+		default:
+			v.WriteRune(c)
+		}
+	}
+
+	// Ending with a single backslash is an invalid sequence
+	if inSlash {
+		return "", InvalidEscapeSequence{sequence: "\\"}
+	}
+
+	return v.String(), nil
+}
+
+// ParseSelectorOrDie takes a string representing a selector and returns an
+// object suitable for matching, or panic when an error occur.
+func ParseSelectorOrDie(s string) Selector {
+	selector, err := ParseSelector(s)
+	if err != nil {
+		panic(err)
+	}
+	return selector
+}
+
+// ParseSelector takes a string representing a selector and returns an
+// object suitable for matching, or an error.
+func ParseSelector(selector string) (Selector, error) {
+	return parseSelector(selector,
+		func(lhs, rhs string) (newLhs, newRhs string, err error) {
+			return lhs, rhs, nil
+		})
+}
+
+// ParseAndTransformSelector parses the selector and runs them through the given TransformFunc.
+func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) {
+	return parseSelector(selector, fn)
+}
+
+// TransformFunc transforms selectors.
+type TransformFunc func(field, value string) (newField, newValue string, err error)
+
+// splitTerms returns the comma-separated terms contained in the given fieldSelector.
+// Backslash-escaped commas are treated as data instead of delimiters, and are included in the returned terms, with the leading backslash preserved.
+func splitTerms(fieldSelector string) []string {
+	if len(fieldSelector) == 0 {
+		return nil
+	}
+
+	terms := make([]string, 0, 1)
+	startIndex := 0
+	inSlash := false
+	for i, c := range fieldSelector {
+		switch {
+		case inSlash:
+			inSlash = false
+		case c == '\\':
+			inSlash = true
+		case c == ',':
+			terms = append(terms, fieldSelector[startIndex:i])
+			startIndex = i + 1
+		}
+	}
+
+	terms = append(terms, fieldSelector[startIndex:])
+
+	return terms
+}
+
+const (
+	notEqualOperator    = "!="
+	doubleEqualOperator = "=="
+	equalOperator       = "="
+)
+
+// termOperators holds the recognized operators supported in fieldSelectors.
+// doubleEqualOperator and equal are equivalent, but doubleEqualOperator is checked first
+// to avoid leaving a leading = character on the rhs value.
+var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator}
+
+// splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful.
+// no escaping of special characters is supported in the lhs value, so the first occurrence of a recognized operator is used as the split point.
+// the literal rhs is returned, and the caller is responsible for applying any desired unescaping.
+func splitTerm(term string) (lhs, op, rhs string, ok bool) {
+	for i := range term {
+		remaining := term[i:]
+		for _, op := range termOperators {
+			if strings.HasPrefix(remaining, op) {
+				return term[0:i], op, term[i+len(op):], true
+			}
+		}
+	}
+	return "", "", "", false
+}
+
+func parseSelector(selector string, fn TransformFunc) (Selector, error) {
+	parts := splitTerms(selector)
+	sort.StringSlice(parts).Sort()
+	var items []Selector
+	for _, part := range parts {
+		if part == "" {
+			continue
+		}
+		lhs, op, rhs, ok := splitTerm(part)
+		if !ok {
+			return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part)
+		}
+		unescapedRHS, err := UnescapeValue(rhs)
+		if err != nil {
+			return nil, err
+		}
+		switch op {
+		case notEqualOperator:
+			items = append(items, &notHasTerm{field: lhs, value: unescapedRHS})
+		case doubleEqualOperator:
+			items = append(items, &hasTerm{field: lhs, value: unescapedRHS})
+		case equalOperator:
+			items = append(items, &hasTerm{field: lhs, value: unescapedRHS})
+		default:
+			return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part)
+		}
+	}
+	if len(items) == 1 {
+		return items[0].Transform(fn)
+	}
+	return andTerm(items).Transform(fn)
+}
+
+// OneTermEqualSelector returns an object that matches objects where one field/field equals one value.
+// Cannot return an error.
+func OneTermEqualSelector(k, v string) Selector {
+	return &hasTerm{field: k, value: v}
+}
+
+// OneTermNotEqualSelector returns an object that matches objects where one field/field does not equal one value.
+// Cannot return an error.
+func OneTermNotEqualSelector(k, v string) Selector {
+	return &notHasTerm{field: k, value: v}
+}
+
+// AndSelectors creates a selector that is the logical AND of all the given selectors
+func AndSelectors(selectors ...Selector) Selector {
+	return andTerm(selectors)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
new file mode 100644
index 0000000..82de005
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package labels implements a simple label system, parsing and matching
+// selectors with sets of labels.
+package labels // import "k8s.io/apimachinery/pkg/labels"
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
new file mode 100644
index 0000000..d9eeb4f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go
@@ -0,0 +1,189 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package labels
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Labels allows you to present labels independently from their storage.
+type Labels interface {
+	// Has returns whether the provided label exists.
+	Has(label string) (exists bool)
+
+	// Get returns the value for the provided label.
+	Get(label string) (value string)
+}
+
+// Set is a map of label:value. It implements Labels.
+type Set map[string]string
+
+// String returns all labels listed as a human readable string.
+// Conveniently, exactly the format that ParseSelector takes.
+func (ls Set) String() string {
+	selector := make([]string, 0, len(ls))
+	for key, value := range ls {
+		selector = append(selector, key+"="+value)
+	}
+	// Sort for determinism.
+	sort.StringSlice(selector).Sort()
+	return strings.Join(selector, ",")
+}
+
+// Has returns whether the provided label exists in the map.
+func (ls Set) Has(label string) bool {
+	_, exists := ls[label]
+	return exists
+}
+
+// Get returns the value in the map for the provided label.
+func (ls Set) Get(label string) string {
+	return ls[label]
+}
+
+// AsSelector converts labels into a selectors. It does not
+// perform any validation, which means the server will reject
+// the request if the Set contains invalid values.
+func (ls Set) AsSelector() Selector {
+	return SelectorFromSet(ls)
+}
+
+// AsValidatedSelector converts labels into a selectors.
+// The Set is validated client-side, which allows to catch errors early.
+func (ls Set) AsValidatedSelector() (Selector, error) {
+	return ValidatedSelectorFromSet(ls)
+}
+
+// AsSelectorPreValidated converts labels into a selector, but
+// assumes that labels are already validated and thus doesn't
+// perform any validation.
+// According to our measurements this is significantly faster
+// in codepaths that matter at high scale.
+func (ls Set) AsSelectorPreValidated() Selector {
+	return SelectorFromValidatedSet(ls)
+}
+
+// FormatLabels convert label map into plain string
+func FormatLabels(labelMap map[string]string) string {
+	l := Set(labelMap).String()
+	if l == "" {
+		l = "<none>"
+	}
+	return l
+}
+
+// Conflicts takes 2 maps and returns true if there a key match between
+// the maps but the value doesn't match, and returns false in other cases
+func Conflicts(labels1, labels2 Set) bool {
+	small := labels1
+	big := labels2
+	if len(labels2) < len(labels1) {
+		small = labels2
+		big = labels1
+	}
+
+	for k, v := range small {
+		if val, match := big[k]; match {
+			if val != v {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// Merge combines given maps, and does not check for any conflicts
+// between the maps. In case of conflicts, second map (labels2) wins
+func Merge(labels1, labels2 Set) Set {
+	mergedMap := Set{}
+
+	for k, v := range labels1 {
+		mergedMap[k] = v
+	}
+	for k, v := range labels2 {
+		mergedMap[k] = v
+	}
+	return mergedMap
+}
+
+// Equals returns true if the given maps are equal
+func Equals(labels1, labels2 Set) bool {
+	if len(labels1) != len(labels2) {
+		return false
+	}
+
+	for k, v := range labels1 {
+		value, ok := labels2[k]
+		if !ok {
+			return false
+		}
+		if value != v {
+			return false
+		}
+	}
+	return true
+}
+
+// AreLabelsInWhiteList verifies if the provided label list
+// is in the provided whitelist and returns true, otherwise false.
+func AreLabelsInWhiteList(labels, whitelist Set) bool {
+	if len(whitelist) == 0 {
+		return true
+	}
+
+	for k, v := range labels {
+		value, ok := whitelist[k]
+		if !ok {
+			return false
+		}
+		if value != v {
+			return false
+		}
+	}
+	return true
+}
+
+// ConvertSelectorToLabelsMap converts selector string to labels map
+// and validates keys and values
+func ConvertSelectorToLabelsMap(selector string) (Set, error) {
+	labelsMap := Set{}
+
+	if len(selector) == 0 {
+		return labelsMap, nil
+	}
+
+	labels := strings.Split(selector, ",")
+	for _, label := range labels {
+		l := strings.Split(label, "=")
+		if len(l) != 2 {
+			return labelsMap, fmt.Errorf("invalid selector: %s", l)
+		}
+		key := strings.TrimSpace(l[0])
+		if err := validateLabelKey(key); err != nil {
+			return labelsMap, err
+		}
+		value := strings.TrimSpace(l[1])
+		if err := validateLabelValue(key, value); err != nil {
+			return labelsMap, err
+		}
+		labelsMap[key] = value
+	}
+	return labelsMap, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
new file mode 100644
index 0000000..bf62f98
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -0,0 +1,923 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package labels
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/selection"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/klog/v2"
+)
+
+// Requirements is AND of all requirements.
+type Requirements []Requirement
+
+// Selector represents a label selector.
+type Selector interface {
+	// Matches returns true if this selector matches the given set of labels.
+	Matches(Labels) bool
+
+	// Empty returns true if this selector does not restrict the selection space.
+	Empty() bool
+
+	// String returns a human readable string that represents this selector.
+	String() string
+
+	// Add adds requirements to the Selector
+	Add(r ...Requirement) Selector
+
+	// Requirements converts this interface into Requirements to expose
+	// more detailed selection information.
+	// If there are querying parameters, it will return converted requirements and selectable=true.
+	// If this selector doesn't want to select anything, it will return selectable=false.
+	Requirements() (requirements Requirements, selectable bool)
+
+	// Make a deep copy of the selector.
+	DeepCopySelector() Selector
+
+	// RequiresExactMatch allows a caller to introspect whether a given selector
+	// requires a single specific label to be set, and if so returns the value it
+	// requires.
+	RequiresExactMatch(label string) (value string, found bool)
+}
+
+// Everything returns a selector that matches all labels.
+func Everything() Selector {
+	return internalSelector{}
+}
+
+type nothingSelector struct{}
+
+func (n nothingSelector) Matches(_ Labels) bool              { return false }
+func (n nothingSelector) Empty() bool                        { return false }
+func (n nothingSelector) String() string                     { return "" }
+func (n nothingSelector) Add(_ ...Requirement) Selector      { return n }
+func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false }
+func (n nothingSelector) DeepCopySelector() Selector         { return n }
+func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) {
+	return "", false
+}
+
+// Nothing returns a selector that matches no labels
+func Nothing() Selector {
+	return nothingSelector{}
+}
+
+// NewSelector returns a nil selector
+func NewSelector() Selector {
+	return internalSelector(nil)
+}
+
+type internalSelector []Requirement
+
+func (s internalSelector) DeepCopy() internalSelector {
+	if s == nil {
+		return nil
+	}
+	result := make([]Requirement, len(s))
+	for i := range s {
+		s[i].DeepCopyInto(&result[i])
+	}
+	return result
+}
+
+func (s internalSelector) DeepCopySelector() Selector {
+	return s.DeepCopy()
+}
+
+// ByKey sorts requirements by key to obtain deterministic parser
+type ByKey []Requirement
+
+func (a ByKey) Len() int { return len(a) }
+
+func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key }
+
+// Requirement contains values, a key, and an operator that relates the key and values.
+// The zero value of Requirement is invalid.
+// Requirement implements both set based match and exact match
+// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement.
+// +k8s:deepcopy-gen=true
+type Requirement struct {
+	key      string
+	operator selection.Operator
+	// In huge majority of cases we have at most one value here.
+	// It is generally faster to operate on a single-element slice
+	// than on a single-element map, so we have a slice here.
+	strValues []string
+}
+
+// NewRequirement is the constructor for a Requirement.
+// If any of these rules is violated, an error is returned:
+// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist.
+// (2) If the operator is In or NotIn, the values set must be non-empty.
+// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
+// (4) If the operator is Exists or DoesNotExist, the value set must be empty.
+// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
+// (6) The key is invalid due to its length, or sequence
+//     of characters. See validateLabelKey for more details.
+//
+// The empty string is a valid value in the input values set.
+func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) {
+	if err := validateLabelKey(key); err != nil {
+		return nil, err
+	}
+	switch op {
+	case selection.In, selection.NotIn:
+		if len(vals) == 0 {
+			return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty")
+		}
+	case selection.Equals, selection.DoubleEquals, selection.NotEquals:
+		if len(vals) != 1 {
+			return nil, fmt.Errorf("exact-match compatibility requires one single value")
+		}
+	case selection.Exists, selection.DoesNotExist:
+		if len(vals) != 0 {
+			return nil, fmt.Errorf("values set must be empty for exists and does not exist")
+		}
+	case selection.GreaterThan, selection.LessThan:
+		if len(vals) != 1 {
+			return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required")
+		}
+		for i := range vals {
+			if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil {
+				return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer")
+			}
+		}
+	default:
+		return nil, fmt.Errorf("operator '%v' is not recognized", op)
+	}
+
+	for i := range vals {
+		if err := validateLabelValue(key, vals[i]); err != nil {
+			return nil, err
+		}
+	}
+	return &Requirement{key: key, operator: op, strValues: vals}, nil
+}
+
+func (r *Requirement) hasValue(value string) bool {
+	for i := range r.strValues {
+		if r.strValues[i] == value {
+			return true
+		}
+	}
+	return false
+}
+
+// Matches returns true if the Requirement matches the input Labels.
+// There is a match in the following cases:
+// (1) The operator is Exists and Labels has the Requirement's key.
+// (2) The operator is In, Labels has the Requirement's key and Labels'
+//     value for that key is in Requirement's value set.
+// (3) The operator is NotIn, Labels has the Requirement's key and
+//     Labels' value for that key is not in Requirement's value set.
+// (4) The operator is DoesNotExist or NotIn and Labels does not have the
+//     Requirement's key.
+// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has
+//     the Requirement's key and the corresponding value satisfies mathematical inequality.
+func (r *Requirement) Matches(ls Labels) bool {
+	switch r.operator {
+	case selection.In, selection.Equals, selection.DoubleEquals:
+		if !ls.Has(r.key) {
+			return false
+		}
+		return r.hasValue(ls.Get(r.key))
+	case selection.NotIn, selection.NotEquals:
+		if !ls.Has(r.key) {
+			return true
+		}
+		return !r.hasValue(ls.Get(r.key))
+	case selection.Exists:
+		return ls.Has(r.key)
+	case selection.DoesNotExist:
+		return !ls.Has(r.key)
+	case selection.GreaterThan, selection.LessThan:
+		if !ls.Has(r.key) {
+			return false
+		}
+		lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
+		if err != nil {
+			klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
+			return false
+		}
+
+		// There should be only one strValue in r.strValues, and can be converted to an integer.
+		if len(r.strValues) != 1 {
+			klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
+			return false
+		}
+
+		var rValue int64
+		for i := range r.strValues {
+			rValue, err = strconv.ParseInt(r.strValues[i], 10, 64)
+			if err != nil {
+				klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
+				return false
+			}
+		}
+		return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue)
+	default:
+		return false
+	}
+}
+
+// Key returns requirement key
+func (r *Requirement) Key() string {
+	return r.key
+}
+
+// Operator returns requirement operator
+func (r *Requirement) Operator() selection.Operator {
+	return r.operator
+}
+
+// Values returns requirement values
+func (r *Requirement) Values() sets.String {
+	ret := sets.String{}
+	for i := range r.strValues {
+		ret.Insert(r.strValues[i])
+	}
+	return ret
+}
+
+// Empty returns true if the internalSelector doesn't restrict selection space
+func (lsel internalSelector) Empty() bool {
+	if lsel == nil {
+		return true
+	}
+	return len(lsel) == 0
+}
+
+// String returns a human-readable string that represents this
+// Requirement. If called on an invalid Requirement, an error is
+// returned. See NewRequirement for creating a valid Requirement.
+func (r *Requirement) String() string {
+	var buffer bytes.Buffer
+	if r.operator == selection.DoesNotExist {
+		buffer.WriteString("!")
+	}
+	buffer.WriteString(r.key)
+
+	switch r.operator {
+	case selection.Equals:
+		buffer.WriteString("=")
+	case selection.DoubleEquals:
+		buffer.WriteString("==")
+	case selection.NotEquals:
+		buffer.WriteString("!=")
+	case selection.In:
+		buffer.WriteString(" in ")
+	case selection.NotIn:
+		buffer.WriteString(" notin ")
+	case selection.GreaterThan:
+		buffer.WriteString(">")
+	case selection.LessThan:
+		buffer.WriteString("<")
+	case selection.Exists, selection.DoesNotExist:
+		return buffer.String()
+	}
+
+	switch r.operator {
+	case selection.In, selection.NotIn:
+		buffer.WriteString("(")
+	}
+	if len(r.strValues) == 1 {
+		buffer.WriteString(r.strValues[0])
+	} else { // only > 1 since == 0 prohibited by NewRequirement
+		// normalizes value order on output, without mutating the in-memory selector representation
+		// also avoids normalization when it is not required, and ensures we do not mutate shared data
+		buffer.WriteString(strings.Join(safeSort(r.strValues), ","))
+	}
+
+	switch r.operator {
+	case selection.In, selection.NotIn:
+		buffer.WriteString(")")
+	}
+	return buffer.String()
+}
+
+// safeSort sort input strings without modification
+func safeSort(in []string) []string {
+	if sort.StringsAreSorted(in) {
+		return in
+	}
+	out := make([]string, len(in))
+	copy(out, in)
+	sort.Strings(out)
+	return out
+}
+
+// Add adds requirements to the selector. It copies the current selector returning a new one
+func (lsel internalSelector) Add(reqs ...Requirement) Selector {
+	var sel internalSelector
+	for ix := range lsel {
+		sel = append(sel, lsel[ix])
+	}
+	for _, r := range reqs {
+		sel = append(sel, r)
+	}
+	sort.Sort(ByKey(sel))
+	return sel
+}
+
+// Matches for a internalSelector returns true if all
+// its Requirements match the input Labels. If any
+// Requirement does not match, false is returned.
+func (lsel internalSelector) Matches(l Labels) bool {
+	for ix := range lsel {
+		if matches := lsel[ix].Matches(l); !matches {
+			return false
+		}
+	}
+	return true
+}
+
+func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true }
+
+// String returns a comma-separated string of all
+// the internalSelector Requirements' human-readable strings.
+func (lsel internalSelector) String() string {
+	var reqs []string
+	for ix := range lsel {
+		reqs = append(reqs, lsel[ix].String())
+	}
+	return strings.Join(reqs, ",")
+}
+
+// RequiresExactMatch introspect whether a given selector requires a single specific field
+// to be set, and if so returns the value it requires.
+func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) {
+	for ix := range lsel {
+		if lsel[ix].key == label {
+			switch lsel[ix].operator {
+			case selection.Equals, selection.DoubleEquals, selection.In:
+				if len(lsel[ix].strValues) == 1 {
+					return lsel[ix].strValues[0], true
+				}
+			}
+			return "", false
+		}
+	}
+	return "", false
+}
+
+// Token represents constant definition for lexer token
+type Token int
+
+const (
+	// ErrorToken represents scan error
+	ErrorToken Token = iota
+	// EndOfStringToken represents end of string
+	EndOfStringToken
+	// ClosedParToken represents close parenthesis
+	ClosedParToken
+	// CommaToken represents the comma
+	CommaToken
+	// DoesNotExistToken represents logic not
+	DoesNotExistToken
+	// DoubleEqualsToken represents double equals
+	DoubleEqualsToken
+	// EqualsToken represents equal
+	EqualsToken
+	// GreaterThanToken represents greater than
+	GreaterThanToken
+	// IdentifierToken represents identifier, e.g. keys and values
+	IdentifierToken
+	// InToken represents in
+	InToken
+	// LessThanToken represents less than
+	LessThanToken
+	// NotEqualsToken represents not equal
+	NotEqualsToken
+	// NotInToken represents not in
+	NotInToken
+	// OpenParToken represents open parenthesis
+	OpenParToken
+)
+
+// string2token contains the mapping between lexer Token and token literal
+// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense)
+var string2token = map[string]Token{
+	")":     ClosedParToken,
+	",":     CommaToken,
+	"!":     DoesNotExistToken,
+	"==":    DoubleEqualsToken,
+	"=":     EqualsToken,
+	">":     GreaterThanToken,
+	"in":    InToken,
+	"<":     LessThanToken,
+	"!=":    NotEqualsToken,
+	"notin": NotInToken,
+	"(":     OpenParToken,
+}
+
+// ScannedItem contains the Token and the literal produced by the lexer.
+type ScannedItem struct {
+	tok     Token
+	literal string
+}
+
+// isWhitespace returns true if the rune is a space, tab, or newline.
+func isWhitespace(ch byte) bool {
+	return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n'
+}
+
+// isSpecialSymbol detect if the character ch can be an operator
+func isSpecialSymbol(ch byte) bool {
+	switch ch {
+	case '=', '!', '(', ')', ',', '>', '<':
+		return true
+	}
+	return false
+}
+
+// Lexer represents the Lexer struct for label selector.
+// It contains necessary informationt to tokenize the input string
+type Lexer struct {
+	// s stores the string to be tokenized
+	s string
+	// pos is the position currently tokenized
+	pos int
+}
+
+// read return the character currently lexed
+// increment the position and check the buffer overflow
+func (l *Lexer) read() (b byte) {
+	b = 0
+	if l.pos < len(l.s) {
+		b = l.s[l.pos]
+		l.pos++
+	}
+	return b
+}
+
+// unread 'undoes' the last read character
+func (l *Lexer) unread() {
+	l.pos--
+}
+
+// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier.
+func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) {
+	var buffer []byte
+IdentifierLoop:
+	for {
+		switch ch := l.read(); {
+		case ch == 0:
+			break IdentifierLoop
+		case isSpecialSymbol(ch) || isWhitespace(ch):
+			l.unread()
+			break IdentifierLoop
+		default:
+			buffer = append(buffer, ch)
+		}
+	}
+	s := string(buffer)
+	if val, ok := string2token[s]; ok { // is a literal token?
+		return val, s
+	}
+	return IdentifierToken, s // otherwise is an identifier
+}
+
+// scanSpecialSymbol scans string starting with special symbol.
+// special symbol identify non literal operators. "!=", "==", "="
+func (l *Lexer) scanSpecialSymbol() (Token, string) {
+	lastScannedItem := ScannedItem{}
+	var buffer []byte
+SpecialSymbolLoop:
+	for {
+		switch ch := l.read(); {
+		case ch == 0:
+			break SpecialSymbolLoop
+		case isSpecialSymbol(ch):
+			buffer = append(buffer, ch)
+			if token, ok := string2token[string(buffer)]; ok {
+				lastScannedItem = ScannedItem{tok: token, literal: string(buffer)}
+			} else if lastScannedItem.tok != 0 {
+				l.unread()
+				break SpecialSymbolLoop
+			}
+		default:
+			l.unread()
+			break SpecialSymbolLoop
+		}
+	}
+	if lastScannedItem.tok == 0 {
+		return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer)
+	}
+	return lastScannedItem.tok, lastScannedItem.literal
+}
+
+// skipWhiteSpaces consumes all blank characters
+// returning the first non blank character
+func (l *Lexer) skipWhiteSpaces(ch byte) byte {
+	for {
+		if !isWhitespace(ch) {
+			return ch
+		}
+		ch = l.read()
+	}
+}
+
+// Lex returns a pair of Token and the literal
+// literal is meaningfull only for IdentifierToken token
+func (l *Lexer) Lex() (tok Token, lit string) {
+	switch ch := l.skipWhiteSpaces(l.read()); {
+	case ch == 0:
+		return EndOfStringToken, ""
+	case isSpecialSymbol(ch):
+		l.unread()
+		return l.scanSpecialSymbol()
+	default:
+		l.unread()
+		return l.scanIDOrKeyword()
+	}
+}
+
+// Parser data structure contains the label selector parser data structure
+type Parser struct {
+	l            *Lexer
+	scannedItems []ScannedItem
+	position     int
+}
+
+// ParserContext represents context during parsing:
+// some literal for example 'in' and 'notin' can be
+// recognized as operator for example 'x in (a)' but
+// it can be recognized as value for example 'value in (in)'
+type ParserContext int
+
+const (
+	// KeyAndOperator represents key and operator
+	KeyAndOperator ParserContext = iota
+	// Values represents values
+	Values
+)
+
+// lookahead func returns the current token and string. No increment of current position
+func (p *Parser) lookahead(context ParserContext) (Token, string) {
+	tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal
+	if context == Values {
+		switch tok {
+		case InToken, NotInToken:
+			tok = IdentifierToken
+		}
+	}
+	return tok, lit
+}
+
+// consume returns current token and string. Increments the position
+func (p *Parser) consume(context ParserContext) (Token, string) {
+	p.position++
+	tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal
+	if context == Values {
+		switch tok {
+		case InToken, NotInToken:
+			tok = IdentifierToken
+		}
+	}
+	return tok, lit
+}
+
+// scan runs through the input string and stores the ScannedItem in an array
+// Parser can now lookahead and consume the tokens
+func (p *Parser) scan() {
+	for {
+		token, literal := p.l.Lex()
+		p.scannedItems = append(p.scannedItems, ScannedItem{token, literal})
+		if token == EndOfStringToken {
+			break
+		}
+	}
+}
+
+// parse runs the left recursive descending algorithm
+// on input string. It returns a list of Requirement objects.
+func (p *Parser) parse() (internalSelector, error) {
+	p.scan() // init scannedItems
+
+	var requirements internalSelector
+	for {
+		tok, lit := p.lookahead(Values)
+		switch tok {
+		case IdentifierToken, DoesNotExistToken:
+			r, err := p.parseRequirement()
+			if err != nil {
+				return nil, fmt.Errorf("unable to parse requirement: %v", err)
+			}
+			requirements = append(requirements, *r)
+			t, l := p.consume(Values)
+			switch t {
+			case EndOfStringToken:
+				return requirements, nil
+			case CommaToken:
+				t2, l2 := p.lookahead(Values)
+				if t2 != IdentifierToken && t2 != DoesNotExistToken {
+					return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2)
+				}
+			default:
+				return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l)
+			}
+		case EndOfStringToken:
+			return requirements, nil
+		default:
+			return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit)
+		}
+	}
+}
+
+func (p *Parser) parseRequirement() (*Requirement, error) {
+	key, operator, err := p.parseKeyAndInferOperator()
+	if err != nil {
+		return nil, err
+	}
+	if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked
+		return NewRequirement(key, operator, []string{})
+	}
+	operator, err = p.parseOperator()
+	if err != nil {
+		return nil, err
+	}
+	var values sets.String
+	switch operator {
+	case selection.In, selection.NotIn:
+		values, err = p.parseValues()
+	case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan:
+		values, err = p.parseExactValue()
+	}
+	if err != nil {
+		return nil, err
+	}
+	return NewRequirement(key, operator, values.List())
+
+}
+
+// parseKeyAndInferOperator parse literals.
+// in case of no operator '!, in, notin, ==, =, !=' are found
+// the 'exists' operator is inferred
+func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) {
+	var operator selection.Operator
+	tok, literal := p.consume(Values)
+	if tok == DoesNotExistToken {
+		operator = selection.DoesNotExist
+		tok, literal = p.consume(Values)
+	}
+	if tok != IdentifierToken {
+		err := fmt.Errorf("found '%s', expected: identifier", literal)
+		return "", "", err
+	}
+	if err := validateLabelKey(literal); err != nil {
+		return "", "", err
+	}
+	if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken {
+		if operator != selection.DoesNotExist {
+			operator = selection.Exists
+		}
+	}
+	return literal, operator, nil
+}
+
+// parseOperator return operator and eventually matchType
+// matchType can be exact
+func (p *Parser) parseOperator() (op selection.Operator, err error) {
+	tok, lit := p.consume(KeyAndOperator)
+	switch tok {
+	// DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator
+	case InToken:
+		op = selection.In
+	case EqualsToken:
+		op = selection.Equals
+	case DoubleEqualsToken:
+		op = selection.DoubleEquals
+	case GreaterThanToken:
+		op = selection.GreaterThan
+	case LessThanToken:
+		op = selection.LessThan
+	case NotInToken:
+		op = selection.NotIn
+	case NotEqualsToken:
+		op = selection.NotEquals
+	default:
+		return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit)
+	}
+	return op, nil
+}
+
+// parseValues parses the values for set based matching (x,y,z)
+func (p *Parser) parseValues() (sets.String, error) {
+	tok, lit := p.consume(Values)
+	if tok != OpenParToken {
+		return nil, fmt.Errorf("found '%s' expected: '('", lit)
+	}
+	tok, lit = p.lookahead(Values)
+	switch tok {
+	case IdentifierToken, CommaToken:
+		s, err := p.parseIdentifiersList() // handles general cases
+		if err != nil {
+			return s, err
+		}
+		if tok, _ = p.consume(Values); tok != ClosedParToken {
+			return nil, fmt.Errorf("found '%s', expected: ')'", lit)
+		}
+		return s, nil
+	case ClosedParToken: // handles "()"
+		p.consume(Values)
+		return sets.NewString(""), nil
+	default:
+		return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
+	}
+}
+
+// parseIdentifiersList parses a (possibly empty) list of
+// of comma separated (possibly empty) identifiers
+func (p *Parser) parseIdentifiersList() (sets.String, error) {
+	s := sets.NewString()
+	for {
+		tok, lit := p.consume(Values)
+		switch tok {
+		case IdentifierToken:
+			s.Insert(lit)
+			tok2, lit2 := p.lookahead(Values)
+			switch tok2 {
+			case CommaToken:
+				continue
+			case ClosedParToken:
+				return s, nil
+			default:
+				return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2)
+			}
+		case CommaToken: // handled here since we can have "(,"
+			if s.Len() == 0 {
+				s.Insert("") // to handle (,
+			}
+			tok2, _ := p.lookahead(Values)
+			if tok2 == ClosedParToken {
+				s.Insert("") // to handle ,)  Double "" removed by StringSet
+				return s, nil
+			}
+			if tok2 == CommaToken {
+				p.consume(Values)
+				s.Insert("") // to handle ,, Double "" removed by StringSet
+			}
+		default: // it can be operator
+			return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit)
+		}
+	}
+}
+
+// parseExactValue parses the only value for exact match style
+func (p *Parser) parseExactValue() (sets.String, error) {
+	s := sets.NewString()
+	tok, lit := p.lookahead(Values)
+	if tok == EndOfStringToken || tok == CommaToken {
+		s.Insert("")
+		return s, nil
+	}
+	tok, lit = p.consume(Values)
+	if tok == IdentifierToken {
+		s.Insert(lit)
+		return s, nil
+	}
+	return nil, fmt.Errorf("found '%s', expected: identifier", lit)
+}
+
+// Parse takes a string representing a selector and returns a selector
+// object, or an error. This parsing function differs from ParseSelector
+// as they parse different selectors with different syntaxes.
+// The input will cause an error if it does not follow this form:
+//
+//  <selector-syntax>         ::= <requirement> | <requirement> "," <selector-syntax>
+//  <requirement>             ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ]
+//  <set-based-restriction>   ::= "" | <inclusion-exclusion> <value-set>
+//  <inclusion-exclusion>     ::= <inclusion> | <exclusion>
+//  <exclusion>               ::= "notin"
+//  <inclusion>               ::= "in"
+//  <value-set>               ::= "(" <values> ")"
+//  <values>                  ::= VALUE | VALUE "," <values>
+//  <exact-match-restriction> ::= ["="|"=="|"!="] VALUE
+//
+// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters.
+// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters.
+// Delimiter is white space: (' ', '\t')
+// Example of valid syntax:
+//  "x in (foo,,baz),y,z notin ()"
+//
+// Note:
+//  (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the
+//      VALUEs in its requirement
+//  (2) Exclusion - " notin " - denotes that the KEY is not equal to any
+//      of the VALUEs in its requirement or does not exist
+//  (3) The empty string is a valid VALUE
+//  (4) A requirement with just a KEY - as in "y" above - denotes that
+//      the KEY exists and can be any VALUE.
+//  (5) A requirement with just !KEY requires that the KEY not exist.
+//
+func Parse(selector string) (Selector, error) {
+	parsedSelector, err := parse(selector)
+	if err == nil {
+		return parsedSelector, nil
+	}
+	return nil, err
+}
+
+// parse parses the string representation of the selector and returns the internalSelector struct.
+// The callers of this method can then decide how to return the internalSelector struct to their
+// callers. This function has two callers now, one returns a Selector interface and the other
+// returns a list of requirements.
+func parse(selector string) (internalSelector, error) {
+	p := &Parser{l: &Lexer{s: selector, pos: 0}}
+	items, err := p.parse()
+	if err != nil {
+		return nil, err
+	}
+	sort.Sort(ByKey(items)) // sort to grant determistic parsing
+	return internalSelector(items), err
+}
+
+func validateLabelKey(k string) error {
+	if errs := validation.IsQualifiedName(k); len(errs) != 0 {
+		return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; "))
+	}
+	return nil
+}
+
+func validateLabelValue(k, v string) error {
+	if errs := validation.IsValidLabelValue(v); len(errs) != 0 {
+		return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; "))
+	}
+	return nil
+}
+
+// SelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil and empty Sets are considered equivalent to Everything().
+// It does not perform any validation, which means the server will reject
+// the request if the Set contains invalid values.
+func SelectorFromSet(ls Set) Selector {
+	return SelectorFromValidatedSet(ls)
+}
+
+// ValidatedSelectorFromSet returns a Selector which will match exactly the given Set. A
+// nil and empty Sets are considered equivalent to Everything().
+// The Set is validated client-side, which allows to catch errors early.
+func ValidatedSelectorFromSet(ls Set) (Selector, error) {
+	if ls == nil || len(ls) == 0 {
+		return internalSelector{}, nil
+	}
+	requirements := make([]Requirement, 0, len(ls))
+	for label, value := range ls {
+		r, err := NewRequirement(label, selection.Equals, []string{value})
+		if err != nil {
+			return nil, err
+		}
+		requirements = append(requirements, *r)
+	}
+	// sort to have deterministic string representation
+	sort.Sort(ByKey(requirements))
+	return internalSelector(requirements), nil
+}
+
+// SelectorFromValidatedSet returns a Selector which will match exactly the given Set.
+// A nil and empty Sets are considered equivalent to Everything().
+// It assumes that Set is already validated and doesn't do any validation.
+func SelectorFromValidatedSet(ls Set) Selector {
+	if ls == nil || len(ls) == 0 {
+		return internalSelector{}
+	}
+	requirements := make([]Requirement, 0, len(ls))
+	for label, value := range ls {
+		requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}})
+	}
+	// sort to have deterministic string representation
+	sort.Sort(ByKey(requirements))
+	return internalSelector(requirements)
+}
+
+// ParseToRequirements takes a string representing a selector and returns a list of
+// requirements. This function is suitable for those callers that perform additional
+// processing on selector requirements.
+// See the documentation for Parse() function for more details.
+// TODO: Consider exporting the internalSelector type instead.
+func ParseToRequirements(selector string) ([]Requirement, error) {
+	return parse(selector)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
new file mode 100644
index 0000000..4d48294
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go
@@ -0,0 +1,42 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package labels
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Requirement) DeepCopyInto(out *Requirement) {
+	*out = *in
+	if in.strValues != nil {
+		in, out := &in.strValues, &out.strValues
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement.
+func (in *Requirement) DeepCopy() *Requirement {
+	if in == nil {
+		return nil
+	}
+	out := new(Requirement)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
new file mode 100644
index 0000000..a928631
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go
@@ -0,0 +1,396 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/url"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/conversion/queryparams"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/klog/v2"
+)
+
+// codec binds an encoder and decoder.
+type codec struct {
+	Encoder
+	Decoder
+}
+
+// NewCodec creates a Codec from an Encoder and Decoder.
+func NewCodec(e Encoder, d Decoder) Codec {
+	return codec{e, d}
+}
+
+// Encode is a convenience wrapper for encoding to a []byte from an Encoder
+func Encode(e Encoder, obj Object) ([]byte, error) {
+	// TODO: reuse buffer
+	buf := &bytes.Buffer{}
+	if err := e.Encode(obj, buf); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// Decode is a convenience wrapper for decoding data into an Object.
+func Decode(d Decoder, data []byte) (Object, error) {
+	obj, _, err := d.Decode(data, nil, nil)
+	return obj, err
+}
+
+// DecodeInto performs a Decode into the provided object.
+func DecodeInto(d Decoder, data []byte, into Object) error {
+	out, gvk, err := d.Decode(data, nil, into)
+	if err != nil {
+		return err
+	}
+	if out != into {
+		return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into))
+	}
+	return nil
+}
+
+// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests.
+func EncodeOrDie(e Encoder, obj Object) string {
+	bytes, err := Encode(e, obj)
+	if err != nil {
+		panic(err)
+	}
+	return string(bytes)
+}
+
+// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or
+// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object.
+func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) {
+	if obj != nil {
+		kinds, _, err := t.ObjectKinds(obj)
+		if err != nil {
+			return nil, err
+		}
+		for _, kind := range kinds {
+			if gvk == kind {
+				return obj, nil
+			}
+		}
+	}
+	return c.New(gvk)
+}
+
+// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding.
+type NoopEncoder struct {
+	Decoder
+}
+
+var _ Serializer = NoopEncoder{}
+
+const noopEncoderIdentifier Identifier = "noop"
+
+func (n NoopEncoder) Encode(obj Object, w io.Writer) error {
+	// There is no need to handle runtime.CacheableObject, as we don't
+	// process the obj at all.
+	return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder))
+}
+
+// Identifier implements runtime.Encoder interface.
+func (n NoopEncoder) Identifier() Identifier {
+	return noopEncoderIdentifier
+}
+
+// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding.
+type NoopDecoder struct {
+	Encoder
+}
+
+var _ Serializer = NoopDecoder{}
+
+func (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
+	return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder))
+}
+
+// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back.
+func NewParameterCodec(scheme *Scheme) ParameterCodec {
+	return &parameterCodec{
+		typer:     scheme,
+		convertor: scheme,
+		creator:   scheme,
+		defaulter: scheme,
+	}
+}
+
+// parameterCodec implements conversion to and from query parameters and objects.
+type parameterCodec struct {
+	typer     ObjectTyper
+	convertor ObjectConvertor
+	creator   ObjectCreater
+	defaulter ObjectDefaulter
+}
+
+var _ ParameterCodec = &parameterCodec{}
+
+// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then
+// converts that object to into (if necessary). Returns an error if the operation cannot be completed.
+func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error {
+	if len(parameters) == 0 {
+		return nil
+	}
+	targetGVKs, _, err := c.typer.ObjectKinds(into)
+	if err != nil {
+		return err
+	}
+	for i := range targetGVKs {
+		if targetGVKs[i].GroupVersion() == from {
+			if err := c.convertor.Convert(&parameters, into, nil); err != nil {
+				return err
+			}
+			// in the case where we going into the same object we're receiving, default on the outbound object
+			if c.defaulter != nil {
+				c.defaulter.Default(into)
+			}
+			return nil
+		}
+	}
+
+	input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind))
+	if err != nil {
+		return err
+	}
+	if err := c.convertor.Convert(&parameters, input, nil); err != nil {
+		return err
+	}
+	// if we have defaulter, default the input before converting to output
+	if c.defaulter != nil {
+		c.defaulter.Default(input)
+	}
+	return c.convertor.Convert(input, into, nil)
+}
+
+// EncodeParameters converts the provided object into the to version, then converts that object to url.Values.
+// Returns an error if conversion is not possible.
+func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) {
+	gvks, _, err := c.typer.ObjectKinds(obj)
+	if err != nil {
+		return nil, err
+	}
+	gvk := gvks[0]
+	if to != gvk.GroupVersion() {
+		out, err := c.convertor.ConvertToVersion(obj, to)
+		if err != nil {
+			return nil, err
+		}
+		obj = out
+	}
+	return queryparams.Convert(obj)
+}
+
+type base64Serializer struct {
+	Encoder
+	Decoder
+
+	identifier Identifier
+}
+
+func NewBase64Serializer(e Encoder, d Decoder) Serializer {
+	return &base64Serializer{
+		Encoder:    e,
+		Decoder:    d,
+		identifier: identifier(e),
+	}
+}
+
+func identifier(e Encoder) Identifier {
+	result := map[string]string{
+		"name": "base64",
+	}
+	if e != nil {
+		result["encoder"] = string(e.Identifier())
+	}
+	identifier, err := json.Marshal(result)
+	if err != nil {
+		klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err)
+	}
+	return Identifier(identifier)
+}
+
+func (s base64Serializer) Encode(obj Object, stream io.Writer) error {
+	if co, ok := obj.(CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, stream)
+	}
+	return s.doEncode(obj, stream)
+}
+
+func (s base64Serializer) doEncode(obj Object, stream io.Writer) error {
+	e := base64.NewEncoder(base64.StdEncoding, stream)
+	err := s.Encoder.Encode(obj, e)
+	e.Close()
+	return err
+}
+
+// Identifier implements runtime.Encoder interface.
+func (s base64Serializer) Identifier() Identifier {
+	return s.identifier
+}
+
+func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
+	out := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
+	n, err := base64.StdEncoding.Decode(out, data)
+	if err != nil {
+		return nil, nil, err
+	}
+	return s.Decoder.Decode(out[:n], defaults, into)
+}
+
+// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot
+// include media-type parameters), or the first info with an empty media type, or false if no type matches.
+func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) {
+	for _, info := range types {
+		if info.MediaType == mediaType {
+			return info, true
+		}
+	}
+	for _, info := range types {
+		if len(info.MediaType) == 0 {
+			return info, true
+		}
+	}
+	return SerializerInfo{}, false
+}
+
+var (
+	// InternalGroupVersioner will always prefer the internal version for a given group version kind.
+	InternalGroupVersioner GroupVersioner = internalGroupVersioner{}
+	// DisabledGroupVersioner will reject all kinds passed to it.
+	DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{}
+)
+
+const (
+	internalGroupVersionerIdentifier = "internal"
+	disabledGroupVersionerIdentifier = "disabled"
+)
+
+type internalGroupVersioner struct{}
+
+// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version.
+func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
+	for _, kind := range kinds {
+		if kind.Version == APIVersionInternal {
+			return kind, true
+		}
+	}
+	for _, kind := range kinds {
+		return schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true
+	}
+	return schema.GroupVersionKind{}, false
+}
+
+// Identifier implements GroupVersioner interface.
+func (internalGroupVersioner) Identifier() string {
+	return internalGroupVersionerIdentifier
+}
+
+type disabledGroupVersioner struct{}
+
+// KindForGroupVersionKinds returns false for any input.
+func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
+	return schema.GroupVersionKind{}, false
+}
+
+// Identifier implements GroupVersioner interface.
+func (disabledGroupVersioner) Identifier() string {
+	return disabledGroupVersionerIdentifier
+}
+
+// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner
+var _ GroupVersioner = schema.GroupVersion{}
+var _ GroupVersioner = schema.GroupVersions{}
+var _ GroupVersioner = multiGroupVersioner{}
+
+type multiGroupVersioner struct {
+	target             schema.GroupVersion
+	acceptedGroupKinds []schema.GroupKind
+	coerce             bool
+}
+
+// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds.
+// Kind may be empty in the provided group kind, in which case any kind will match.
+func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {
+	if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) {
+		return gv
+	}
+	return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds}
+}
+
+// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind.
+// Incoming kinds that match the provided groupKinds are preferred.
+// Kind may be empty in the provided group kind, in which case any kind will match.
+// Examples:
+//   gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar
+//   KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind)
+//
+//   gv=mygroup/__internal, groupKinds=mygroup, anothergroup
+//   KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group)
+//
+//   gv=mygroup/__internal, groupKinds=mygroup, anothergroup
+//   KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list)
+func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {
+	return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true}
+}
+
+// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will
+// use the originating kind where possible.
+func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
+	for _, src := range kinds {
+		for _, kind := range v.acceptedGroupKinds {
+			if kind.Group != src.Group {
+				continue
+			}
+			if len(kind.Kind) > 0 && kind.Kind != src.Kind {
+				continue
+			}
+			return v.target.WithKind(src.Kind), true
+		}
+	}
+	if v.coerce && len(kinds) > 0 {
+		return v.target.WithKind(kinds[0].Kind), true
+	}
+	return schema.GroupVersionKind{}, false
+}
+
+// Identifier implements GroupVersioner interface.
+func (v multiGroupVersioner) Identifier() string {
+	groupKinds := make([]string, 0, len(v.acceptedGroupKinds))
+	for _, gk := range v.acceptedGroupKinds {
+		groupKinds = append(groupKinds, gk.String())
+	}
+	result := map[string]string{
+		"name":     "multi",
+		"target":   v.target.String(),
+		"accepted": strings.Join(groupKinds, ","),
+		"coerce":   strconv.FormatBool(v.coerce),
+	}
+	identifier, err := json.Marshal(result)
+	if err != nil {
+		klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err)
+	}
+	return string(identifier)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
new file mode 100644
index 0000000..0002280
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/json"
+)
+
+// CheckCodec makes sure that the codec can encode objects like internalType,
+// decode all of the external types listed, and also decode them into the given
+// object. (Will modify internalObject.) (Assumes JSON serialization.)
+// TODO: verify that the correct external version is chosen on encode...
+func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error {
+	if _, err := Encode(c, internalType); err != nil {
+		return fmt.Errorf("Internal type not encodable: %v", err)
+	}
+	for _, et := range externalTypes {
+		typeMeta := TypeMeta{
+			Kind:       et.Kind,
+			APIVersion: et.GroupVersion().String(),
+		}
+		exBytes, err := json.Marshal(&typeMeta)
+		if err != nil {
+			return err
+		}
+		obj, err := Decode(c, exBytes)
+		if err != nil {
+			return fmt.Errorf("external type %s not interpretable: %v", et, err)
+		}
+		if reflect.TypeOf(obj) != reflect.TypeOf(internalType) {
+			return fmt.Errorf("decode of external type %s produced: %#v", et, obj)
+		}
+		if err = DecodeInto(c, exBytes, internalType); err != nil {
+			return fmt.Errorf("external type %s not convertible to internal type: %v", et, err)
+		}
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
new file mode 100644
index 0000000..d04d701
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package runtime defines conversions between generic types and structs to map query strings
+// to struct objects.
+package runtime
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/conversion"
+)
+
+// DefaultMetaV1FieldSelectorConversion auto-accepts metav1 values for name and namespace.
+// A cluster scoped resource specifying namespace empty works fine and specifying a particular
+// namespace will return no results, as expected.
+func DefaultMetaV1FieldSelectorConversion(label, value string) (string, string, error) {
+	switch label {
+	case "metadata.name":
+		return label, value, nil
+	case "metadata.namespace":
+		return label, value, nil
+	default:
+		return "", "", fmt.Errorf("%q is not a known field selector: only %q, %q", label, "metadata.name", "metadata.namespace")
+	}
+}
+
+// JSONKeyMapper uses the struct tags on a conversion to determine the key value for
+// the other side. Use when mapping from a map[string]* to a struct or vice versa.
+func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) {
+	if s := destTag.Get("json"); len(s) > 0 {
+		return strings.SplitN(s, ",", 2)[0], key
+	}
+	if s := sourceTag.Get("json"); len(s) > 0 {
+		return key, strings.SplitN(s, ",", 2)[0]
+	}
+	return key, key
+}
+
+func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error {
+	if len(*in) == 0 {
+		*out = ""
+		return nil
+	}
+	*out = (*in)[0]
+	return nil
+}
+
+func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error {
+	if len(*in) == 0 {
+		*out = 0
+		return nil
+	}
+	str := (*in)[0]
+	i, err := strconv.Atoi(str)
+	if err != nil {
+		return err
+	}
+	*out = i
+	return nil
+}
+
+// Convert_Slice_string_To_bool will convert a string parameter to boolean.
+// Only the absence of a value (i.e. zero-length slice), a value of "false", or a
+// value of "0" resolve to false.
+// Any other value (including empty string) resolves to true.
+func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error {
+	if len(*in) == 0 {
+		*out = false
+		return nil
+	}
+	switch {
+	case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"):
+		*out = false
+	default:
+		*out = true
+	}
+	return nil
+}
+
+// Convert_Slice_string_To_bool will convert a string parameter to boolean.
+// Only the absence of a value (i.e. zero-length slice), a value of "false", or a
+// value of "0" resolve to false.
+// Any other value (including empty string) resolves to true.
+func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error {
+	if len(*in) == 0 {
+		boolVar := false
+		*out = &boolVar
+		return nil
+	}
+	switch {
+	case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"):
+		boolVar := false
+		*out = &boolVar
+	default:
+		boolVar := true
+		*out = &boolVar
+	}
+	return nil
+}
+
+func string_to_int64(in string) (int64, error) {
+	return strconv.ParseInt(in, 10, 64)
+}
+
+func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error {
+	if in == nil {
+		*out = 0
+		return nil
+	}
+	i, err := string_to_int64(*in)
+	if err != nil {
+		return err
+	}
+	*out = i
+	return nil
+}
+
+func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error {
+	if len(*in) == 0 {
+		*out = 0
+		return nil
+	}
+	i, err := string_to_int64((*in)[0])
+	if err != nil {
+		return err
+	}
+	*out = i
+	return nil
+}
+
+func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error {
+	if in == nil {
+		*out = nil
+		return nil
+	}
+	i, err := string_to_int64(*in)
+	if err != nil {
+		return err
+	}
+	*out = &i
+	return nil
+}
+
+func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error {
+	if len(*in) == 0 {
+		*out = nil
+		return nil
+	}
+	i, err := string_to_int64((*in)[0])
+	if err != nil {
+		return err
+	}
+	*out = &i
+	return nil
+}
+
+func RegisterStringConversions(s *Scheme) error {
+	if err := s.AddConversionFunc((*[]string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_string(a.(*[]string), b.(*string), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_int(a.(*[]string), b.(*int), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_bool(a.(*[]string), b.(*bool), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_int64(a.(*[]string), b.(*int64), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
new file mode 100644
index 0000000..871e4c8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
@@ -0,0 +1,707 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	encodingjson "encoding/json"
+	"fmt"
+	"math"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/util/json"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"sigs.k8s.io/structured-merge-diff/v4/value"
+
+	"k8s.io/klog/v2"
+)
+
+// UnstructuredConverter is an interface for converting between interface{}
+// and map[string]interface representation.
+type UnstructuredConverter interface {
+	ToUnstructured(obj interface{}) (map[string]interface{}, error)
+	FromUnstructured(u map[string]interface{}, obj interface{}) error
+}
+
+type structField struct {
+	structType reflect.Type
+	field      int
+}
+
+type fieldInfo struct {
+	name      string
+	nameValue reflect.Value
+	omitempty bool
+}
+
+type fieldsCacheMap map[structField]*fieldInfo
+
+type fieldsCache struct {
+	sync.Mutex
+	value atomic.Value
+}
+
+func newFieldsCache() *fieldsCache {
+	cache := &fieldsCache{}
+	cache.value.Store(make(fieldsCacheMap))
+	return cache
+}
+
+var (
+	mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
+	stringType             = reflect.TypeOf(string(""))
+	fieldCache             = newFieldsCache()
+
+	// DefaultUnstructuredConverter performs unstructured to Go typed object conversions.
+	DefaultUnstructuredConverter = &unstructuredConverter{
+		mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")),
+		comparison: conversion.EqualitiesOrDie(
+			func(a, b time.Time) bool {
+				return a.UTC() == b.UTC()
+			},
+		),
+	}
+)
+
+func parseBool(key string) bool {
+	if len(key) == 0 {
+		return false
+	}
+	value, err := strconv.ParseBool(key)
+	if err != nil {
+		utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key))
+	}
+	return value
+}
+
+// unstructuredConverter knows how to convert between interface{} and
+// Unstructured in both ways.
+type unstructuredConverter struct {
+	// If true, we will be additionally running conversion via json
+	// to ensure that the result is true.
+	// This is supposed to be set only in tests.
+	mismatchDetection bool
+	// comparison is the default test logic used to compare
+	comparison conversion.Equalities
+}
+
+// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them
+// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external
+// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection.
+func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter {
+	return &unstructuredConverter{
+		mismatchDetection: true,
+		comparison:        comparison,
+	}
+}
+
+// FromUnstructured converts an object from map[string]interface{} representation into a concrete type.
+// It uses encoding/json/Unmarshaler if object implements it or reflection if not.
+func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error {
+	t := reflect.TypeOf(obj)
+	value := reflect.ValueOf(obj)
+	if t.Kind() != reflect.Ptr || value.IsNil() {
+		return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t)
+	}
+	err := fromUnstructured(reflect.ValueOf(u), value.Elem())
+	if c.mismatchDetection {
+		newObj := reflect.New(t.Elem()).Interface()
+		newErr := fromUnstructuredViaJSON(u, newObj)
+		if (err != nil) != (newErr != nil) {
+			klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err)
+		}
+		if err == nil && !c.comparison.DeepEqual(obj, newObj) {
+			klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj)
+		}
+	}
+	return err
+}
+
+func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error {
+	data, err := json.Marshal(u)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, obj)
+}
+
+func fromUnstructured(sv, dv reflect.Value) error {
+	sv = unwrapInterface(sv)
+	if !sv.IsValid() {
+		dv.Set(reflect.Zero(dv.Type()))
+		return nil
+	}
+	st, dt := sv.Type(), dv.Type()
+
+	switch dt.Kind() {
+	case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface:
+		// Those require non-trivial conversion.
+	default:
+		// This should handle all simple types.
+		if st.AssignableTo(dt) {
+			dv.Set(sv)
+			return nil
+		}
+		// We cannot simply use "ConvertibleTo", as JSON doesn't support conversions
+		// between those four groups: bools, integers, floats and string. We need to
+		// do the same.
+		if st.ConvertibleTo(dt) {
+			switch st.Kind() {
+			case reflect.String:
+				switch dt.Kind() {
+				case reflect.String:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			case reflect.Bool:
+				switch dt.Kind() {
+				case reflect.Bool:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+				reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+				switch dt.Kind() {
+				case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+					reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			case reflect.Float32, reflect.Float64:
+				switch dt.Kind() {
+				case reflect.Float32, reflect.Float64:
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+				if sv.Float() == math.Trunc(sv.Float()) {
+					dv.Set(sv.Convert(dt))
+					return nil
+				}
+			}
+			return fmt.Errorf("cannot convert %s to %s", st.String(), dt.String())
+		}
+	}
+
+	// Check if the object has a custom JSON marshaller/unmarshaller.
+	entry := value.TypeReflectEntryOf(dv.Type())
+	if entry.CanConvertFromUnstructured() {
+		return entry.FromUnstructured(sv, dv)
+	}
+
+	switch dt.Kind() {
+	case reflect.Map:
+		return mapFromUnstructured(sv, dv)
+	case reflect.Slice:
+		return sliceFromUnstructured(sv, dv)
+	case reflect.Ptr:
+		return pointerFromUnstructured(sv, dv)
+	case reflect.Struct:
+		return structFromUnstructured(sv, dv)
+	case reflect.Interface:
+		return interfaceFromUnstructured(sv, dv)
+	default:
+		return fmt.Errorf("unrecognized type: %v", dt.Kind())
+	}
+}
+
+func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo {
+	fieldCacheMap := fieldCache.value.Load().(fieldsCacheMap)
+	if info, ok := fieldCacheMap[structField{structType, field}]; ok {
+		return info
+	}
+
+	// Cache miss - we need to compute the field name.
+	info := &fieldInfo{}
+	typeField := structType.Field(field)
+	jsonTag := typeField.Tag.Get("json")
+	if len(jsonTag) == 0 {
+		// Make the first character lowercase.
+		if typeField.Name == "" {
+			info.name = typeField.Name
+		} else {
+			info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:]
+		}
+	} else {
+		items := strings.Split(jsonTag, ",")
+		info.name = items[0]
+		for i := range items {
+			if items[i] == "omitempty" {
+				info.omitempty = true
+				break
+			}
+		}
+	}
+	info.nameValue = reflect.ValueOf(info.name)
+
+	fieldCache.Lock()
+	defer fieldCache.Unlock()
+	fieldCacheMap = fieldCache.value.Load().(fieldsCacheMap)
+	newFieldCacheMap := make(fieldsCacheMap)
+	for k, v := range fieldCacheMap {
+		newFieldCacheMap[k] = v
+	}
+	newFieldCacheMap[structField{structType, field}] = info
+	fieldCache.value.Store(newFieldCacheMap)
+	return info
+}
+
+func unwrapInterface(v reflect.Value) reflect.Value {
+	for v.Kind() == reflect.Interface {
+		v = v.Elem()
+	}
+	return v
+}
+
+func mapFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if st.Kind() != reflect.Map {
+		return fmt.Errorf("cannot restore map from %v", st.Kind())
+	}
+
+	if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) {
+		return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key())
+	}
+
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	dv.Set(reflect.MakeMap(dt))
+	for _, key := range sv.MapKeys() {
+		value := reflect.New(dt.Elem()).Elem()
+		if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() {
+			if err := fromUnstructured(val, value); err != nil {
+				return err
+			}
+		} else {
+			value.Set(reflect.Zero(dt.Elem()))
+		}
+		if st.Key().AssignableTo(dt.Key()) {
+			dv.SetMapIndex(key, value)
+		} else {
+			dv.SetMapIndex(key.Convert(dt.Key()), value)
+		}
+	}
+	return nil
+}
+
+func sliceFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 {
+		// We store original []byte representation as string.
+		// This conversion is allowed, but we need to be careful about
+		// marshaling data appropriately.
+		if len(sv.Interface().(string)) > 0 {
+			marshalled, err := json.Marshal(sv.Interface())
+			if err != nil {
+				return fmt.Errorf("error encoding %s to json: %v", st, err)
+			}
+			// TODO: Is this Unmarshal needed?
+			var data []byte
+			err = json.Unmarshal(marshalled, &data)
+			if err != nil {
+				return fmt.Errorf("error decoding from json: %v", err)
+			}
+			dv.SetBytes(data)
+		} else {
+			dv.Set(reflect.Zero(dt))
+		}
+		return nil
+	}
+	if st.Kind() != reflect.Slice {
+		return fmt.Errorf("cannot restore slice from %v", st.Kind())
+	}
+
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap()))
+	for i := 0; i < sv.Len(); i++ {
+		if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func pointerFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+
+	if st.Kind() == reflect.Ptr && sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	dv.Set(reflect.New(dt.Elem()))
+	switch st.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		return fromUnstructured(sv.Elem(), dv.Elem())
+	default:
+		return fromUnstructured(sv, dv.Elem())
+	}
+}
+
+func structFromUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if st.Kind() != reflect.Map {
+		return fmt.Errorf("cannot restore struct from: %v", st.Kind())
+	}
+
+	for i := 0; i < dt.NumField(); i++ {
+		fieldInfo := fieldInfoFromField(dt, i)
+		fv := dv.Field(i)
+
+		if len(fieldInfo.name) == 0 {
+			// This field is inlined.
+			if err := fromUnstructured(sv, fv); err != nil {
+				return err
+			}
+		} else {
+			value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue))
+			if value.IsValid() {
+				if err := fromUnstructured(value, fv); err != nil {
+					return err
+				}
+			} else {
+				fv.Set(reflect.Zero(fv.Type()))
+			}
+		}
+	}
+	return nil
+}
+
+func interfaceFromUnstructured(sv, dv reflect.Value) error {
+	// TODO: Is this conversion safe?
+	dv.Set(sv)
+	return nil
+}
+
+// ToUnstructured converts an object into map[string]interface{} representation.
+// It uses encoding/json/Marshaler if object implements it or reflection if not.
+func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) {
+	var u map[string]interface{}
+	var err error
+	if unstr, ok := obj.(Unstructured); ok {
+		u = unstr.UnstructuredContent()
+	} else {
+		t := reflect.TypeOf(obj)
+		value := reflect.ValueOf(obj)
+		if t.Kind() != reflect.Ptr || value.IsNil() {
+			return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t)
+		}
+		u = map[string]interface{}{}
+		err = toUnstructured(value.Elem(), reflect.ValueOf(&u).Elem())
+	}
+	if c.mismatchDetection {
+		newUnstr := map[string]interface{}{}
+		newErr := toUnstructuredViaJSON(obj, &newUnstr)
+		if (err != nil) != (newErr != nil) {
+			klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr)
+		}
+		if err == nil && !c.comparison.DeepEqual(u, newUnstr) {
+			klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr)
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+	return u, nil
+}
+
+// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains
+// types produced by json.Unmarshal() and also int64.
+// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil
+func DeepCopyJSON(x map[string]interface{}) map[string]interface{} {
+	return DeepCopyJSONValue(x).(map[string]interface{})
+}
+
+// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains
+// types produced by json.Unmarshal() and also int64.
+// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil
+func DeepCopyJSONValue(x interface{}) interface{} {
+	switch x := x.(type) {
+	case map[string]interface{}:
+		if x == nil {
+			// Typed nil - an interface{} that contains a type map[string]interface{} with a value of nil
+			return x
+		}
+		clone := make(map[string]interface{}, len(x))
+		for k, v := range x {
+			clone[k] = DeepCopyJSONValue(v)
+		}
+		return clone
+	case []interface{}:
+		if x == nil {
+			// Typed nil - an interface{} that contains a type []interface{} with a value of nil
+			return x
+		}
+		clone := make([]interface{}, len(x))
+		for i, v := range x {
+			clone[i] = DeepCopyJSONValue(v)
+		}
+		return clone
+	case string, int64, bool, float64, nil, encodingjson.Number:
+		return x
+	default:
+		panic(fmt.Errorf("cannot deep copy %T", x))
+	}
+}
+
+func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error {
+	data, err := json.Marshal(obj)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(data, u)
+}
+
+func toUnstructured(sv, dv reflect.Value) error {
+	// Check if the object has a custom string converter.
+	entry := value.TypeReflectEntryOf(sv.Type())
+	if entry.CanConvertToUnstructured() {
+		v, err := entry.ToUnstructured(sv)
+		if err != nil {
+			return err
+		}
+		if v != nil {
+			dv.Set(reflect.ValueOf(v))
+		}
+		return nil
+	}
+	st := sv.Type()
+	switch st.Kind() {
+	case reflect.String:
+		dv.Set(reflect.ValueOf(sv.String()))
+		return nil
+	case reflect.Bool:
+		dv.Set(reflect.ValueOf(sv.Bool()))
+		return nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		dv.Set(reflect.ValueOf(sv.Int()))
+		return nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		uVal := sv.Uint()
+		if uVal > math.MaxInt64 {
+			return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal)
+		}
+		dv.Set(reflect.ValueOf(int64(uVal)))
+		return nil
+	case reflect.Float32, reflect.Float64:
+		dv.Set(reflect.ValueOf(sv.Float()))
+		return nil
+	case reflect.Map:
+		return mapToUnstructured(sv, dv)
+	case reflect.Slice:
+		return sliceToUnstructured(sv, dv)
+	case reflect.Ptr:
+		return pointerToUnstructured(sv, dv)
+	case reflect.Struct:
+		return structToUnstructured(sv, dv)
+	case reflect.Interface:
+		return interfaceToUnstructured(sv, dv)
+	default:
+		return fmt.Errorf("unrecognized type: %v", st.Kind())
+	}
+}
+
+func mapToUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+		if st.Key().Kind() == reflect.String {
+			switch st.Elem().Kind() {
+			// TODO It should be possible to reuse the slice for primitive types.
+			// However, it is panicing in the following form.
+			// case reflect.String, reflect.Bool,
+			// 	reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+			// 	reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			// 	sv.Set(sv)
+			// 	return nil
+			default:
+				// We need to do a proper conversion.
+			}
+		}
+		dv.Set(reflect.MakeMap(mapStringInterfaceType))
+		dv = dv.Elem()
+		dt = dv.Type()
+	}
+	if dt.Kind() != reflect.Map {
+		return fmt.Errorf("cannot convert struct to: %v", dt.Kind())
+	}
+
+	if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) {
+		return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key())
+	}
+
+	for _, key := range sv.MapKeys() {
+		value := reflect.New(dt.Elem()).Elem()
+		if err := toUnstructured(sv.MapIndex(key), value); err != nil {
+			return err
+		}
+		if st.Key().AssignableTo(dt.Key()) {
+			dv.SetMapIndex(key, value)
+		} else {
+			dv.SetMapIndex(key.Convert(dt.Key()), value)
+		}
+	}
+	return nil
+}
+
+func sliceToUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if sv.IsNil() {
+		dv.Set(reflect.Zero(dt))
+		return nil
+	}
+	if st.Elem().Kind() == reflect.Uint8 {
+		dv.Set(reflect.New(stringType))
+		data, err := json.Marshal(sv.Bytes())
+		if err != nil {
+			return err
+		}
+		var result string
+		if err = json.Unmarshal(data, &result); err != nil {
+			return err
+		}
+		dv.Set(reflect.ValueOf(result))
+		return nil
+	}
+	if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+		switch st.Elem().Kind() {
+		// TODO It should be possible to reuse the slice for primitive types.
+		// However, it is panicing in the following form.
+		// case reflect.String, reflect.Bool,
+		// 	reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+		// 	reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		// 	sv.Set(sv)
+		// 	return nil
+		default:
+			// We need to do a proper conversion.
+			dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap()))
+			dv = dv.Elem()
+			dt = dv.Type()
+		}
+	}
+	if dt.Kind() != reflect.Slice {
+		return fmt.Errorf("cannot convert slice to: %v", dt.Kind())
+	}
+	for i := 0; i < sv.Len(); i++ {
+		if err := toUnstructured(sv.Index(i), dv.Index(i)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func pointerToUnstructured(sv, dv reflect.Value) error {
+	if sv.IsNil() {
+		// We're done - we don't need to store anything.
+		return nil
+	}
+	return toUnstructured(sv.Elem(), dv)
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Map, reflect.Slice:
+		// TODO: It seems that 0-len maps are ignored in it.
+		return v.IsNil() || v.Len() == 0
+	case reflect.Ptr, reflect.Interface:
+		return v.IsNil()
+	}
+	return false
+}
+
+func structToUnstructured(sv, dv reflect.Value) error {
+	st, dt := sv.Type(), dv.Type()
+	if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
+		dv.Set(reflect.MakeMapWithSize(mapStringInterfaceType, st.NumField()))
+		dv = dv.Elem()
+		dt = dv.Type()
+	}
+	if dt.Kind() != reflect.Map {
+		return fmt.Errorf("cannot convert struct to: %v", dt.Kind())
+	}
+	realMap := dv.Interface().(map[string]interface{})
+
+	for i := 0; i < st.NumField(); i++ {
+		fieldInfo := fieldInfoFromField(st, i)
+		fv := sv.Field(i)
+
+		if fieldInfo.name == "-" {
+			// This field should be skipped.
+			continue
+		}
+		if fieldInfo.omitempty && isZero(fv) {
+			// omitempty fields should be ignored.
+			continue
+		}
+		if len(fieldInfo.name) == 0 {
+			// This field is inlined.
+			if err := toUnstructured(fv, dv); err != nil {
+				return err
+			}
+			continue
+		}
+		switch fv.Type().Kind() {
+		case reflect.String:
+			realMap[fieldInfo.name] = fv.String()
+		case reflect.Bool:
+			realMap[fieldInfo.name] = fv.Bool()
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			realMap[fieldInfo.name] = fv.Int()
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			realMap[fieldInfo.name] = fv.Uint()
+		case reflect.Float32, reflect.Float64:
+			realMap[fieldInfo.name] = fv.Float()
+		default:
+			subv := reflect.New(dt.Elem()).Elem()
+			if err := toUnstructured(fv, subv); err != nil {
+				return err
+			}
+			dv.SetMapIndex(fieldInfo.nameValue, subv)
+		}
+	}
+	return nil
+}
+
+func interfaceToUnstructured(sv, dv reflect.Value) error {
+	if !sv.IsValid() || sv.IsNil() {
+		dv.Set(reflect.Zero(dv.Type()))
+		return nil
+	}
+	return toUnstructured(sv.Elem(), dv)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
new file mode 100644
index 0000000..89feb40
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package runtime includes helper functions for working with API objects
+// that follow the kubernetes API object conventions, which are:
+//
+// 0. Your API objects have a common metadata struct member, TypeMeta.
+//
+// 1. Your code refers to an internal set of API objects.
+//
+// 2. In a separate package, you have an external set of API objects.
+//
+// 3. The external set is considered to be versioned, and no breaking
+// changes are ever made to it (fields may be added but not changed
+// or removed).
+//
+// 4. As your api evolves, you'll make an additional versioned package
+// with every major change.
+//
+// 5. Versioned packages have conversion functions which convert to
+// and from the internal version.
+//
+// 6. You'll continue to support older versions according to your
+// deprecation policy, and you can easily provide a program/library
+// to update old versions into new versions because of 5.
+//
+// 7. All of your serializations and deserializations are handled in a
+// centralized place.
+//
+// Package runtime provides a conversion helper to make 5 easy, and the
+// Encode/Decode/DecodeInto trio to accomplish 7. You can also register
+// additional "codecs" which use a version of your choice. It's
+// recommended that you register your types with runtime in your
+// package's init function.
+//
+// As a bonus, a few common types useful from all api objects and versions
+// are provided in types.go.
+package runtime // import "k8s.io/apimachinery/pkg/runtime"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
new file mode 100644
index 0000000..7251e65
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"errors"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type encodable struct {
+	E        Encoder `json:"-"`
+	obj      Object
+	versions []schema.GroupVersion
+}
+
+func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() }
+func (e encodable) DeepCopyObject() Object {
+	out := e
+	out.obj = e.obj.DeepCopyObject()
+	copy(out.versions, e.versions)
+	return out
+}
+
+// NewEncodable creates an object that will be encoded with the provided codec on demand.
+// Provided as a convenience for test cases dealing with internal objects.
+func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object {
+	if _, ok := obj.(*Unknown); ok {
+		return obj
+	}
+	return encodable{e, obj, versions}
+}
+
+func (e encodable) UnmarshalJSON(in []byte) error {
+	return errors.New("runtime.encodable cannot be unmarshalled from JSON")
+}
+
+// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (e encodable) MarshalJSON() ([]byte, error) {
+	return Encode(e.E, e.obj)
+}
+
+// NewEncodableList creates an object that will be encoded with the provided codec on demand.
+// Provided as a convenience for test cases dealing with internal objects.
+func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersion) []Object {
+	out := make([]Object, len(objects))
+	for i := range objects {
+		if _, ok := objects[i].(*Unknown); ok {
+			out[i] = objects[i]
+			continue
+		}
+		out[i] = NewEncodable(e, objects[i], versions...)
+	}
+	return out
+}
+
+func (e *Unknown) UnmarshalJSON(in []byte) error {
+	if e == nil {
+		return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer")
+	}
+	e.TypeMeta = TypeMeta{}
+	e.Raw = append(e.Raw[0:0], in...)
+	e.ContentEncoding = ""
+	e.ContentType = ContentTypeJSON
+	return nil
+}
+
+// Marshal may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (e Unknown) MarshalJSON() ([]byte, error) {
+	// If ContentType is unset, we assume this is JSON.
+	if e.ContentType != "" && e.ContentType != ContentTypeJSON {
+		return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data")
+	}
+	if e.Raw == nil {
+		return []byte("null"), nil
+	}
+	return e.Raw, nil
+}
+
+func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error {
+	if in == nil {
+		out.Raw = []byte("null")
+		return nil
+	}
+	obj := *in
+	if unk, ok := obj.(*Unknown); ok {
+		if unk.Raw != nil {
+			out.Raw = unk.Raw
+			return nil
+		}
+		obj = out.Object
+	}
+	if obj == nil {
+		out.Raw = nil
+		return nil
+	}
+	out.Object = obj
+	return nil
+}
+
+func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Object, s conversion.Scope) error {
+	if in.Object != nil {
+		*out = in.Object
+		return nil
+	}
+	data := in.Raw
+	if len(data) == 0 || (len(data) == 4 && string(data) == "null") {
+		*out = nil
+		return nil
+	}
+	*out = &Unknown{
+		Raw: data,
+		// TODO: Set ContentEncoding and ContentType appropriately.
+		// Currently we set ContentTypeJSON to make tests passing.
+		ContentType: ContentTypeJSON,
+	}
+	return nil
+}
+
+func RegisterEmbeddedConversions(s *Scheme) error {
+	if err := s.AddConversionFunc((*Object)(nil), (*RawExtension)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_runtime_Object_To_runtime_RawExtension(a.(*Object), b.(*RawExtension), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*RawExtension)(nil), (*Object)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_runtime_RawExtension_To_runtime_Object(a.(*RawExtension), b.(*Object), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go
new file mode 100644
index 0000000..be0c5ed
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go
@@ -0,0 +1,151 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type notRegisteredErr struct {
+	schemeName string
+	gvk        schema.GroupVersionKind
+	target     GroupVersioner
+	t          reflect.Type
+}
+
+func NewNotRegisteredErrForKind(schemeName string, gvk schema.GroupVersionKind) error {
+	return &notRegisteredErr{schemeName: schemeName, gvk: gvk}
+}
+
+func NewNotRegisteredErrForType(schemeName string, t reflect.Type) error {
+	return &notRegisteredErr{schemeName: schemeName, t: t}
+}
+
+func NewNotRegisteredErrForTarget(schemeName string, t reflect.Type, target GroupVersioner) error {
+	return &notRegisteredErr{schemeName: schemeName, t: t, target: target}
+}
+
+func NewNotRegisteredGVKErrForTarget(schemeName string, gvk schema.GroupVersionKind, target GroupVersioner) error {
+	return &notRegisteredErr{schemeName: schemeName, gvk: gvk, target: target}
+}
+
+func (k *notRegisteredErr) Error() string {
+	if k.t != nil && k.target != nil {
+		return fmt.Sprintf("%v is not suitable for converting to %q in scheme %q", k.t, k.target, k.schemeName)
+	}
+	nullGVK := schema.GroupVersionKind{}
+	if k.gvk != nullGVK && k.target != nil {
+		return fmt.Sprintf("%q is not suitable for converting to %q in scheme %q", k.gvk.GroupVersion(), k.target, k.schemeName)
+	}
+	if k.t != nil {
+		return fmt.Sprintf("no kind is registered for the type %v in scheme %q", k.t, k.schemeName)
+	}
+	if len(k.gvk.Kind) == 0 {
+		return fmt.Sprintf("no version %q has been registered in scheme %q", k.gvk.GroupVersion(), k.schemeName)
+	}
+	if k.gvk.Version == APIVersionInternal {
+		return fmt.Sprintf("no kind %q is registered for the internal version of group %q in scheme %q", k.gvk.Kind, k.gvk.Group, k.schemeName)
+	}
+
+	return fmt.Sprintf("no kind %q is registered for version %q in scheme %q", k.gvk.Kind, k.gvk.GroupVersion(), k.schemeName)
+}
+
+// IsNotRegisteredError returns true if the error indicates the provided
+// object or input data is not registered.
+func IsNotRegisteredError(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*notRegisteredErr)
+	return ok
+}
+
+type missingKindErr struct {
+	data string
+}
+
+func NewMissingKindErr(data string) error {
+	return &missingKindErr{data}
+}
+
+func (k *missingKindErr) Error() string {
+	return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data)
+}
+
+// IsMissingKind returns true if the error indicates that the provided object
+// is missing a 'Kind' field.
+func IsMissingKind(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*missingKindErr)
+	return ok
+}
+
+type missingVersionErr struct {
+	data string
+}
+
+func NewMissingVersionErr(data string) error {
+	return &missingVersionErr{data}
+}
+
+func (k *missingVersionErr) Error() string {
+	return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data)
+}
+
+// IsMissingVersion returns true if the error indicates that the provided object
+// is missing a 'Version' field.
+func IsMissingVersion(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*missingVersionErr)
+	return ok
+}
+
+// strictDecodingError is a base error type that is returned by a strict Decoder such
+// as UniversalStrictDecoder.
+type strictDecodingError struct {
+	message string
+	data    string
+}
+
+// NewStrictDecodingError creates a new strictDecodingError object.
+func NewStrictDecodingError(message string, data string) error {
+	return &strictDecodingError{
+		message: message,
+		data:    data,
+	}
+}
+
+func (e *strictDecodingError) Error() string {
+	return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message)
+}
+
+// IsStrictDecodingError returns true if the error indicates that the provided object
+// strictness violations.
+func IsStrictDecodingError(err error) bool {
+	if err == nil {
+		return false
+	}
+	_, ok := err.(*strictDecodingError)
+	return ok
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
new file mode 100644
index 0000000..9056397
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+)
+
+func (re *RawExtension) UnmarshalJSON(in []byte) error {
+	if re == nil {
+		return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer")
+	}
+	if !bytes.Equal(in, []byte("null")) {
+		re.Raw = append(re.Raw[0:0], in...)
+	}
+	return nil
+}
+
+// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value.
+// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go
+func (re RawExtension) MarshalJSON() ([]byte, error) {
+	if re.Raw == nil {
+		// TODO: this is to support legacy behavior of JSONPrinter and YAMLPrinter, which
+		// expect to call json.Marshal on arbitrary versioned objects (even those not in
+		// the scheme). pkg/kubectl/resource#AsVersionedObjects and its interaction with
+		// kubectl get on objects not in the scheme needs to be updated to ensure that the
+		// objects that are not part of the scheme are correctly put into the right form.
+		if re.Object != nil {
+			return json.Marshal(re.Object)
+		}
+		return []byte("null"), nil
+	}
+	// TODO: Check whether ContentType is actually JSON before returning it.
+	return re.Raw, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
new file mode 100644
index 0000000..0719718
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
@@ -0,0 +1,855 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
+
+package runtime
+
+import (
+	fmt "fmt"
+
+	io "io"
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+
+	proto "github.com/gogo/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *RawExtension) Reset()      { *m = RawExtension{} }
+func (*RawExtension) ProtoMessage() {}
+func (*RawExtension) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9d3c45d7f546725c, []int{0}
+}
+func (m *RawExtension) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *RawExtension) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RawExtension.Merge(m, src)
+}
+func (m *RawExtension) XXX_Size() int {
+	return m.Size()
+}
+func (m *RawExtension) XXX_DiscardUnknown() {
+	xxx_messageInfo_RawExtension.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RawExtension proto.InternalMessageInfo
+
+func (m *TypeMeta) Reset()      { *m = TypeMeta{} }
+func (*TypeMeta) ProtoMessage() {}
+func (*TypeMeta) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9d3c45d7f546725c, []int{1}
+}
+func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TypeMeta) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TypeMeta.Merge(m, src)
+}
+func (m *TypeMeta) XXX_Size() int {
+	return m.Size()
+}
+func (m *TypeMeta) XXX_DiscardUnknown() {
+	xxx_messageInfo_TypeMeta.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
+
+func (m *Unknown) Reset()      { *m = Unknown{} }
+func (*Unknown) ProtoMessage() {}
+func (*Unknown) Descriptor() ([]byte, []int) {
+	return fileDescriptor_9d3c45d7f546725c, []int{2}
+}
+func (m *Unknown) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Unknown) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Unknown.Merge(m, src)
+}
+func (m *Unknown) XXX_Size() int {
+	return m.Size()
+}
+func (m *Unknown) XXX_DiscardUnknown() {
+	xxx_messageInfo_Unknown.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Unknown proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension")
+	proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta")
+	proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c)
+}
+
+var fileDescriptor_9d3c45d7f546725c = []byte{
+	// 378 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31,
+	0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5,
+	0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74,
+	0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65,
+	0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b,
+	0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05,
+	0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc,
+	0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b,
+	0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd,
+	0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff,
+	0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20,
+	0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64,
+	0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1,
+	0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1,
+	0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd,
+	0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98,
+	0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e,
+	0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19,
+	0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f,
+	0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3,
+	0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68,
+	0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6,
+	0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00,
+	0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00,
+}
+
+func (m *RawExtension) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Raw != nil {
+		i -= len(m.Raw)
+		copy(dAtA[i:], m.Raw)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TypeMeta) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Unknown) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Unknown) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ContentType)
+	copy(dAtA[i:], m.ContentType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.ContentEncoding)
+	copy(dAtA[i:], m.ContentEncoding)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding)))
+	i--
+	dAtA[i] = 0x1a
+	if m.Raw != nil {
+		i -= len(m.Raw)
+		copy(dAtA[i:], m.Raw)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw)))
+		i--
+		dAtA[i] = 0x12
+	}
+	{
+		size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *RawExtension) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Raw != nil {
+		l = len(m.Raw)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *TypeMeta) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Unknown) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.TypeMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Raw != nil {
+		l = len(m.Raw)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.ContentEncoding)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ContentType)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *RawExtension) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RawExtension{`,
+		`Raw:` + valueToStringGenerated(this.Raw) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TypeMeta) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TypeMeta{`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Unknown) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Unknown{`,
+		`TypeMeta:` + strings.Replace(strings.Replace(this.TypeMeta.String(), "TypeMeta", "TypeMeta", 1), `&`, ``, 1) + `,`,
+		`Raw:` + valueToStringGenerated(this.Raw) + `,`,
+		`ContentEncoding:` + fmt.Sprintf("%v", this.ContentEncoding) + `,`,
+		`ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *RawExtension) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RawExtension: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...)
+			if m.Raw == nil {
+				m.Raw = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TypeMeta) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Unknown) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Unknown: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.TypeMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...)
+			if m.Raw == nil {
+				m.Raw = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContentEncoding = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContentType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
new file mode 100644
index 0000000..0e212ec
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
@@ -0,0 +1,127 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.runtime;
+
+// Package-wide variables from generator "generated".
+option go_package = "runtime";
+
+// RawExtension is used to hold extensions in external versions.
+//
+// To use this, make a field which has RawExtension as its type in your external, versioned
+// struct, and Object in your internal struct. You also need to register your
+// various plugin types.
+//
+// // Internal package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+// 	MyPlugin runtime.Object `json:"myPlugin"`
+// }
+// type PluginA struct {
+// 	AOption string `json:"aOption"`
+// }
+//
+// // External package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+// 	MyPlugin runtime.RawExtension `json:"myPlugin"`
+// }
+// type PluginA struct {
+// 	AOption string `json:"aOption"`
+// }
+//
+// // On the wire, the JSON will look something like this:
+// {
+// 	"kind":"MyAPIObject",
+// 	"apiVersion":"v1",
+// 	"myPlugin": {
+// 		"kind":"PluginA",
+// 		"aOption":"foo",
+// 	},
+// }
+//
+// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
+// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
+// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
+// package's DefaultScheme has conversion functions installed which will unpack the
+// JSON stored in RawExtension, turning it into the correct object type, and storing it
+// in the Object. (TODO: In the case where the object is of an unknown type, a
+// runtime.Unknown object will be created and stored.)
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+// +k8s:openapi-gen=true
+message RawExtension {
+  // Raw is the underlying serialization of this object.
+  //
+  // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
+  optional bytes raw = 1;
+}
+
+// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
+// like this:
+// type MyAwesomeAPIObject struct {
+//      runtime.TypeMeta    `json:",inline"`
+//      ... // other fields
+// }
+// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
+//
+// TypeMeta is provided here for convenience. You may use it directly from this package or define
+// your own with the same fields.
+//
+// +k8s:deepcopy-gen=false
+// +protobuf=true
+// +k8s:openapi-gen=true
+message TypeMeta {
+  // +optional
+  optional string apiVersion = 1;
+
+  // +optional
+  optional string kind = 2;
+}
+
+// Unknown allows api objects with unknown types to be passed-through. This can be used
+// to deal with the API objects from a plug-in. Unknown objects still have functioning
+// TypeMeta features-- kind, version, etc.
+// TODO: Make this object have easy access to field based accessors and settors for
+// metadata and field mutatation.
+//
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +protobuf=true
+// +k8s:openapi-gen=true
+message Unknown {
+  optional TypeMeta typeMeta = 1;
+
+  // Raw will hold the complete serialized object which couldn't be matched
+  // with a registered type. Most likely, nothing should be done with this
+  // except for passing it through the system.
+  optional bytes raw = 2;
+
+  // ContentEncoding is encoding used to encode 'Raw' data.
+  // Unspecified means no encoding.
+  optional string contentEncoding = 3;
+
+  // ContentType  is serialization method used to serialize 'Raw'.
+  // Unspecified means ContentTypeJSON.
+  optional string contentType = 4;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
new file mode 100644
index 0000000..7bd1a3a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
@@ -0,0 +1,259 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/errors"
+)
+
+// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path.
+type unsafeObjectConvertor struct {
+	*Scheme
+}
+
+var _ ObjectConvertor = unsafeObjectConvertor{}
+
+// ConvertToVersion converts in to the provided outVersion without copying the input first, which
+// is only safe if the output object is not mutated or reused.
+func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) {
+	return c.Scheme.UnsafeConvertToVersion(in, outVersion)
+}
+
+// UnsafeObjectConvertor performs object conversion without copying the object structure,
+// for use when the converted object will not be reused or mutated. Primarily for use within
+// versioned codecs, which use the external object for serialization but do not return it.
+func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor {
+	return unsafeObjectConvertor{scheme}
+}
+
+// SetField puts the value of src, into fieldName, which must be a member of v.
+// The value of src must be assignable to the field.
+func SetField(src interface{}, v reflect.Value, fieldName string) error {
+	field := v.FieldByName(fieldName)
+	if !field.IsValid() {
+		return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
+	}
+	srcValue := reflect.ValueOf(src)
+	if srcValue.Type().AssignableTo(field.Type()) {
+		field.Set(srcValue)
+		return nil
+	}
+	if srcValue.Type().ConvertibleTo(field.Type()) {
+		field.Set(srcValue.Convert(field.Type()))
+		return nil
+	}
+	return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type())
+}
+
+// Field puts the value of fieldName, which must be a member of v, into dest,
+// which must be a variable to which this field's value can be assigned.
+func Field(v reflect.Value, fieldName string, dest interface{}) error {
+	field := v.FieldByName(fieldName)
+	if !field.IsValid() {
+		return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
+	}
+	destValue, err := conversion.EnforcePtr(dest)
+	if err != nil {
+		return err
+	}
+	if field.Type().AssignableTo(destValue.Type()) {
+		destValue.Set(field)
+		return nil
+	}
+	if field.Type().ConvertibleTo(destValue.Type()) {
+		destValue.Set(field.Convert(destValue.Type()))
+		return nil
+	}
+	return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type())
+}
+
+// FieldPtr puts the address of fieldName, which must be a member of v,
+// into dest, which must be an address of a variable to which this field's
+// address can be assigned.
+func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error {
+	field := v.FieldByName(fieldName)
+	if !field.IsValid() {
+		return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
+	}
+	v, err := conversion.EnforcePtr(dest)
+	if err != nil {
+		return err
+	}
+	field = field.Addr()
+	if field.Type().AssignableTo(v.Type()) {
+		v.Set(field)
+		return nil
+	}
+	if field.Type().ConvertibleTo(v.Type()) {
+		v.Set(field.Convert(v.Type()))
+		return nil
+	}
+	return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type())
+}
+
+// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form.
+// TODO: accept a content type.
+func EncodeList(e Encoder, objects []Object) error {
+	var errs []error
+	for i := range objects {
+		data, err := Encode(e, objects[i])
+		if err != nil {
+			errs = append(errs, err)
+			continue
+		}
+		// TODO: Set ContentEncoding and ContentType.
+		objects[i] = &Unknown{Raw: data}
+	}
+	return errors.NewAggregate(errs)
+}
+
+func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) {
+	for _, decoder := range decoders {
+		// TODO: Decode based on ContentType.
+		obj, err := Decode(decoder, obj.Raw)
+		if err != nil {
+			if IsNotRegisteredError(err) {
+				continue
+			}
+			return nil, err
+		}
+		return obj, nil
+	}
+	// could not decode, so leave the object as Unknown, but give the decoders the
+	// chance to set Unknown.TypeMeta if it is available.
+	for _, decoder := range decoders {
+		if err := DecodeInto(decoder, obj.Raw, obj); err == nil {
+			return obj, nil
+		}
+	}
+	return obj, nil
+}
+
+// DecodeList alters the list in place, attempting to decode any objects found in
+// the list that have the Unknown type. Any errors that occur are returned
+// after the entire list is processed. Decoders are tried in order.
+func DecodeList(objects []Object, decoders ...Decoder) []error {
+	errs := []error(nil)
+	for i, obj := range objects {
+		switch t := obj.(type) {
+		case *Unknown:
+			decoded, err := decodeListItem(t, decoders)
+			if err != nil {
+				errs = append(errs, err)
+				break
+			}
+			objects[i] = decoded
+		}
+	}
+	return errs
+}
+
+// MultiObjectTyper returns the types of objects across multiple schemes in order.
+type MultiObjectTyper []ObjectTyper
+
+var _ ObjectTyper = MultiObjectTyper{}
+
+func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) {
+	for _, t := range m {
+		gvks, unversionedType, err = t.ObjectKinds(obj)
+		if err == nil {
+			return
+		}
+	}
+	return
+}
+
+func (m MultiObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool {
+	for _, t := range m {
+		if t.Recognizes(gvk) {
+			return true
+		}
+	}
+	return false
+}
+
+// SetZeroValue would set the object of objPtr to zero value of its type.
+func SetZeroValue(objPtr Object) error {
+	v, err := conversion.EnforcePtr(objPtr)
+	if err != nil {
+		return err
+	}
+	v.Set(reflect.Zero(v.Type()))
+	return nil
+}
+
+// DefaultFramer is valid for any stream that can read objects serially without
+// any separation in the stream.
+var DefaultFramer = defaultFramer{}
+
+type defaultFramer struct{}
+
+func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r }
+func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer         { return w }
+
+// WithVersionEncoder serializes an object and ensures the GVK is set.
+type WithVersionEncoder struct {
+	Version GroupVersioner
+	Encoder
+	ObjectTyper
+}
+
+// Encode does not do conversion. It sets the gvk during serialization.
+func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error {
+	gvks, _, err := e.ObjectTyper.ObjectKinds(obj)
+	if err != nil {
+		if IsNotRegisteredError(err) {
+			return e.Encoder.Encode(obj, stream)
+		}
+		return err
+	}
+	kind := obj.GetObjectKind()
+	oldGVK := kind.GroupVersionKind()
+	gvk := gvks[0]
+	if e.Version != nil {
+		preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks)
+		if ok {
+			gvk = preferredGVK
+		}
+	}
+	kind.SetGroupVersionKind(gvk)
+	err = e.Encoder.Encode(obj, stream)
+	kind.SetGroupVersionKind(oldGVK)
+	return err
+}
+
+// WithoutVersionDecoder clears the group version kind of a deserialized object.
+type WithoutVersionDecoder struct {
+	Decoder
+}
+
+// Decode does not do conversion. It removes the gvk during deserialization.
+func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
+	obj, gvk, err := d.Decoder.Decode(data, defaults, into)
+	if obj != nil {
+		kind := obj.GetObjectKind()
+		// clearing the gvk is just a convention of a codec
+		kind.SetGroupVersionKind(schema.GroupVersionKind{})
+	}
+	return obj, gvk, err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
new file mode 100644
index 0000000..f44693c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -0,0 +1,344 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"io"
+	"net/url"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+	// APIVersionInternal may be used if you are registering a type that should not
+	// be considered stable or serialized - it is a convention only and has no
+	// special behavior in this package.
+	APIVersionInternal = "__internal"
+)
+
+// GroupVersioner refines a set of possible conversion targets into a single option.
+type GroupVersioner interface {
+	// KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no
+	// target is known. In general, if the return target is not in the input list, the caller is expected to invoke
+	// Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type.
+	// Sophisticated implementations may use additional information about the input kinds to pick a destination kind.
+	KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool)
+	// Identifier returns string representation of the object.
+	// Identifiers of two different encoders should be equal only if for every input
+	// kinds they return the same result.
+	Identifier() string
+}
+
+// Identifier represents an identifier.
+// Identitier of two different objects should be equal if and only if for every
+// input the output they produce is exactly the same.
+type Identifier string
+
+// Encoder writes objects to a serialized form
+type Encoder interface {
+	// Encode writes an object to a stream. Implementations may return errors if the versions are
+	// incompatible, or if no conversion is defined.
+	Encode(obj Object, w io.Writer) error
+	// Identifier returns an identifier of the encoder.
+	// Identifiers of two different encoders should be equal if and only if for every input
+	// object it will be encoded to the same representation by both of them.
+	//
+	// Identifier is inteted for use with CacheableObject#CacheEncode method. In order to
+	// correctly handle CacheableObject, Encode() method should look similar to below, where
+	// doEncode() is the encoding logic of implemented encoder:
+	//   func (e *MyEncoder) Encode(obj Object, w io.Writer) error {
+	//     if co, ok := obj.(CacheableObject); ok {
+	//       return co.CacheEncode(e.Identifier(), e.doEncode, w)
+	//     }
+	//     return e.doEncode(obj, w)
+	//   }
+	Identifier() Identifier
+}
+
+// Decoder attempts to load an object from data.
+type Decoder interface {
+	// Decode attempts to deserialize the provided data using either the innate typing of the scheme or the
+	// default kind, group, and version provided. It returns a decoded object as well as the kind, group, and
+	// version from the serialized data, or an error. If into is non-nil, it will be used as the target type
+	// and implementations may choose to use it rather than reallocating an object. However, the object is not
+	// guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are
+	// provided, they are applied to the data by default. If no defaults or partial defaults are provided, the
+	// type of the into may be used to guide conversion decisions.
+	Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error)
+}
+
+// Serializer is the core interface for transforming objects into a serialized format and back.
+// Implementations may choose to perform conversion of the object, but no assumptions should be made.
+type Serializer interface {
+	Encoder
+	Decoder
+}
+
+// Codec is a Serializer that deals with the details of versioning objects. It offers the same
+// interface as Serializer, so this is a marker to consumers that care about the version of the objects
+// they receive.
+type Codec Serializer
+
+// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and
+// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing
+// and the desired version must be specified.
+type ParameterCodec interface {
+	// DecodeParameters takes the given url.Values in the specified group version and decodes them
+	// into the provided object, or returns an error.
+	DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error
+	// EncodeParameters encodes the provided object as query parameters or returns an error.
+	EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error)
+}
+
+// Framer is a factory for creating readers and writers that obey a particular framing pattern.
+type Framer interface {
+	NewFrameReader(r io.ReadCloser) io.ReadCloser
+	NewFrameWriter(w io.Writer) io.Writer
+}
+
+// SerializerInfo contains information about a specific serialization format
+type SerializerInfo struct {
+	// MediaType is the value that represents this serializer over the wire.
+	MediaType string
+	// MediaTypeType is the first part of the MediaType ("application" in "application/json").
+	MediaTypeType string
+	// MediaTypeSubType is the second part of the MediaType ("json" in "application/json").
+	MediaTypeSubType string
+	// EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
+	EncodesAsText bool
+	// Serializer is the individual object serializer for this media type.
+	Serializer Serializer
+	// PrettySerializer, if set, can serialize this object in a form biased towards
+	// readability.
+	PrettySerializer Serializer
+	// StreamSerializer, if set, describes the streaming serialization format
+	// for this media type.
+	StreamSerializer *StreamSerializerInfo
+}
+
+// StreamSerializerInfo contains information about a specific stream serialization format
+type StreamSerializerInfo struct {
+	// EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
+	EncodesAsText bool
+	// Serializer is the top level object serializer for this type when streaming
+	Serializer
+	// Framer is the factory for retrieving streams that separate objects on the wire
+	Framer
+}
+
+// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers
+// for multiple supported media types. This would commonly be accepted by a server component
+// that performs HTTP content negotiation to accept multiple formats.
+type NegotiatedSerializer interface {
+	// SupportedMediaTypes is the media types supported for reading and writing single objects.
+	SupportedMediaTypes() []SerializerInfo
+
+	// EncoderForVersion returns an encoder that ensures objects being written to the provided
+	// serializer are in the provided group version.
+	EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder
+	// DecoderForVersion returns a decoder that ensures objects being read by the provided
+	// serializer are in the provided group version by default.
+	DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
+}
+
+// ClientNegotiator handles turning an HTTP content type into the appropriate encoder.
+// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from
+// a NegotiatedSerializer.
+type ClientNegotiator interface {
+	// Encoder returns the appropriate encoder for the provided contentType (e.g. application/json)
+	// and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found
+	// a NegotiateError will be returned. The current client implementations consider params to be
+	// optional modifiers to the contentType and will ignore unrecognized parameters.
+	Encoder(contentType string, params map[string]string) (Encoder, error)
+	// Decoder returns the appropriate decoder for the provided contentType (e.g. application/json)
+	// and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found
+	// a NegotiateError will be returned. The current client implementations consider params to be
+	// optional modifiers to the contentType and will ignore unrecognized parameters.
+	Decoder(contentType string, params map[string]string) (Decoder, error)
+	// StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g.
+	// application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no
+	// serializer is found a NegotiateError will be returned. The Serializer and Framer will always
+	// be returned if a Decoder is returned. The current client implementations consider params to be
+	// optional modifiers to the contentType and will ignore unrecognized parameters.
+	StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error)
+}
+
+// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers
+// that can read and write data at rest. This would commonly be used by client tools that must
+// read files, or server side storage interfaces that persist restful objects.
+type StorageSerializer interface {
+	// SupportedMediaTypes are the media types supported for reading and writing objects.
+	SupportedMediaTypes() []SerializerInfo
+
+	// UniversalDeserializer returns a Serializer that can read objects in multiple supported formats
+	// by introspecting the data at rest.
+	UniversalDeserializer() Decoder
+
+	// EncoderForVersion returns an encoder that ensures objects being written to the provided
+	// serializer are in the provided group version.
+	EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder
+	// DecoderForVersion returns a decoder that ensures objects being read by the provided
+	// serializer are in the provided group version by default.
+	DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
+}
+
+// NestedObjectEncoder is an optional interface that objects may implement to be given
+// an opportunity to encode any nested Objects / RawExtensions during serialization.
+type NestedObjectEncoder interface {
+	EncodeNestedObjects(e Encoder) error
+}
+
+// NestedObjectDecoder is an optional interface that objects may implement to be given
+// an opportunity to decode any nested Objects / RawExtensions during serialization.
+type NestedObjectDecoder interface {
+	DecodeNestedObjects(d Decoder) error
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Non-codec interfaces
+
+type ObjectDefaulter interface {
+	// Default takes an object (must be a pointer) and applies any default values.
+	// Defaulters may not error.
+	Default(in Object)
+}
+
+type ObjectVersioner interface {
+	ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
+}
+
+// ObjectConvertor converts an object to a different version.
+type ObjectConvertor interface {
+	// Convert attempts to convert one object into another, or returns an error. This
+	// method does not mutate the in object, but the in and out object might share data structures,
+	// i.e. the out object cannot be mutated without mutating the in object as well.
+	// The context argument will be passed to all nested conversions.
+	Convert(in, out, context interface{}) error
+	// ConvertToVersion takes the provided object and converts it the provided version. This
+	// method does not mutate the in object, but the in and out object might share data structures,
+	// i.e. the out object cannot be mutated without mutating the in object as well.
+	// This method is similar to Convert() but handles specific details of choosing the correct
+	// output version.
+	ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error)
+	ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error)
+}
+
+// ObjectTyper contains methods for extracting the APIVersion and Kind
+// of objects.
+type ObjectTyper interface {
+	// ObjectKinds returns the all possible group,version,kind of the provided object, true if
+	// the object is unversioned, or an error if the object is not recognized
+	// (IsNotRegisteredError will return true).
+	ObjectKinds(Object) ([]schema.GroupVersionKind, bool, error)
+	// Recognizes returns true if the scheme is able to handle the provided version and kind,
+	// or more precisely that the provided version is a possible conversion or decoding
+	// target.
+	Recognizes(gvk schema.GroupVersionKind) bool
+}
+
+// ObjectCreater contains methods for instantiating an object by kind and version.
+type ObjectCreater interface {
+	New(kind schema.GroupVersionKind) (out Object, err error)
+}
+
+// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource
+type EquivalentResourceMapper interface {
+	// EquivalentResourcesFor returns a list of resources that address the same underlying data as resource.
+	// If subresource is specified, only equivalent resources which also have the same subresource are included.
+	// The specified resource can be included in the returned list.
+	EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource
+	// KindFor returns the kind expected by the specified resource[/subresource].
+	// A zero value is returned if the kind is unknown.
+	KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind
+}
+
+// EquivalentResourceRegistry provides an EquivalentResourceMapper interface,
+// and allows registering known resource[/subresource] -> kind
+type EquivalentResourceRegistry interface {
+	EquivalentResourceMapper
+	// RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind.
+	RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind)
+}
+
+// ResourceVersioner provides methods for setting and retrieving
+// the resource version from an API object.
+type ResourceVersioner interface {
+	SetResourceVersion(obj Object, version string) error
+	ResourceVersion(obj Object) (string, error)
+}
+
+// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object.
+type SelfLinker interface {
+	SetSelfLink(obj Object, selfLink string) error
+	SelfLink(obj Object) (string, error)
+
+	// Knowing Name is sometimes necessary to use a SelfLinker.
+	Name(obj Object) (string, error)
+	// Knowing Namespace is sometimes necessary to use a SelfLinker
+	Namespace(obj Object) (string, error)
+}
+
+// Object interface must be supported by all API types registered with Scheme. Since objects in a scheme are
+// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows
+// serializers to set the kind, version, and group the object is represented as. An Object may choose
+// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized.
+type Object interface {
+	GetObjectKind() schema.ObjectKind
+	DeepCopyObject() Object
+}
+
+// CacheableObject allows an object to cache its different serializations
+// to avoid performing the same serialization multiple times.
+type CacheableObject interface {
+	// CacheEncode writes an object to a stream. The <encode> function will
+	// be used in case of cache miss. The <encode> function takes ownership
+	// of the object.
+	// If CacheableObject is a wrapper, then deep-copy of the wrapped object
+	// should be passed to <encode> function.
+	// CacheEncode assumes that for two different calls with the same <id>,
+	// <encode> function will also be the same.
+	CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error
+	// GetObject returns a deep-copy of an object to be encoded - the caller of
+	// GetObject() is the owner of returned object. The reason for making a copy
+	// is to avoid bugs, where caller modifies the object and forgets to copy it,
+	// thus modifying the object for everyone.
+	// The object returned by GetObject should be the same as the one that is supposed
+	// to be passed to <encode> function in CacheEncode method.
+	// If CacheableObject is a wrapper, the copy of wrapped object should be returned.
+	GetObject() Object
+}
+
+// Unstructured objects store values as map[string]interface{}, with only values that can be serialized
+// to JSON allowed.
+type Unstructured interface {
+	Object
+	// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
+	// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
+	NewEmptyInstance() Unstructured
+	// UnstructuredContent returns a non-nil map with this object's contents. Values may be
+	// []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to
+	// and from JSON. SetUnstructuredContent should be used to mutate the contents.
+	UnstructuredContent() map[string]interface{}
+	// SetUnstructuredContent updates the object content to match the provided map.
+	SetUnstructuredContent(map[string]interface{})
+	// IsList returns true if this type is a list or matches the list convention - has an array called "items".
+	IsList() bool
+	// EachListItem should pass a single item out of the list as an Object to the provided function. Any
+	// error should terminate the iteration. If IsList() returns false, this method should return an error
+	// instead of calling the provided function.
+	EachListItem(func(Object) error) error
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go
new file mode 100644
index 0000000..3ff8461
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"sync"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type equivalentResourceRegistry struct {
+	// keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups).
+	// if null, or if "" is returned, resource.String() is used as the key
+	keyFunc func(resource schema.GroupResource) string
+	// resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources).
+	// main resources are stored with subresource="".
+	resources map[string]map[string][]schema.GroupVersionResource
+	// kinds maps resource -> subresource -> kind
+	kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind
+	// keys caches the computed key for each GroupResource
+	keys map[schema.GroupResource]string
+
+	mutex sync.RWMutex
+}
+
+var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil)
+var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil)
+
+// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent.
+func NewEquivalentResourceRegistry() EquivalentResourceRegistry {
+	return &equivalentResourceRegistry{}
+}
+
+// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function.
+// If "" is returned by the function, GroupResource#String is used as the identity.
+// GroupResources with the same identity string are considered equivalent.
+func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry {
+	return &equivalentResourceRegistry{keyFunc: keyFunc}
+}
+
+func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	return r.resources[r.keys[resource.GroupResource()]][subresource]
+}
+func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	return r.kinds[resource][subresource]
+}
+func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if r.kinds == nil {
+		r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{}
+	}
+	if r.kinds[resource] == nil {
+		r.kinds[resource] = map[string]schema.GroupVersionKind{}
+	}
+	r.kinds[resource][subresource] = kind
+
+	// get the shared key of the parent resource
+	key := ""
+	gr := resource.GroupResource()
+	if r.keyFunc != nil {
+		key = r.keyFunc(gr)
+	}
+	if key == "" {
+		key = gr.String()
+	}
+
+	if r.keys == nil {
+		r.keys = map[schema.GroupResource]string{}
+	}
+	r.keys[gr] = key
+
+	if r.resources == nil {
+		r.resources = map[string]map[string][]schema.GroupVersionResource{}
+	}
+	if r.resources[key] == nil {
+		r.resources[key] = map[string][]schema.GroupVersionResource{}
+	}
+	r.resources[key][subresource] = append(r.resources[key][subresource], resource)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go
new file mode 100644
index 0000000..159b301
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go
@@ -0,0 +1,146 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// NegotiateError is returned when a ClientNegotiator is unable to locate
+// a serializer for the requested operation.
+type NegotiateError struct {
+	ContentType string
+	Stream      bool
+}
+
+func (e NegotiateError) Error() string {
+	if e.Stream {
+		return fmt.Sprintf("no stream serializers registered for %s", e.ContentType)
+	}
+	return fmt.Sprintf("no serializers registered for %s", e.ContentType)
+}
+
+type clientNegotiator struct {
+	serializer     NegotiatedSerializer
+	encode, decode GroupVersioner
+}
+
+func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) {
+	// TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method
+	// if client negotiators truly need to use it
+	mediaTypes := n.serializer.SupportedMediaTypes()
+	info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
+	if !ok {
+		if len(contentType) != 0 || len(mediaTypes) == 0 {
+			return nil, NegotiateError{ContentType: contentType}
+		}
+		info = mediaTypes[0]
+	}
+	return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil
+}
+
+func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) {
+	mediaTypes := n.serializer.SupportedMediaTypes()
+	info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
+	if !ok {
+		if len(contentType) != 0 || len(mediaTypes) == 0 {
+			return nil, NegotiateError{ContentType: contentType}
+		}
+		info = mediaTypes[0]
+	}
+	return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil
+}
+
+func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) {
+	mediaTypes := n.serializer.SupportedMediaTypes()
+	info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
+	if !ok {
+		if len(contentType) != 0 || len(mediaTypes) == 0 {
+			return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true}
+		}
+		info = mediaTypes[0]
+	}
+	if info.StreamSerializer == nil {
+		return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true}
+	}
+	return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil
+}
+
+// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or
+// stream decoder for a given content type. Does not perform any conversion, but will
+// encode the object to the desired group, version, and kind. Use when creating a client.
+func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator {
+	return &clientNegotiator{
+		serializer: serializer,
+		encode:     gv,
+	}
+}
+
+// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver
+// where objects are converted to gv prior to sending and decoded to their internal representation prior
+// to retrieval.
+//
+// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release.
+func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator {
+	decode := schema.GroupVersions{
+		{
+			Group:   gv.Group,
+			Version: APIVersionInternal,
+		},
+		// always include the legacy group as a decoding target to handle non-error `Status` return types
+		{
+			Group:   "",
+			Version: APIVersionInternal,
+		},
+	}
+	return &clientNegotiator{
+		encode:     gv,
+		decode:     decode,
+		serializer: serializer,
+	}
+}
+
+// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used
+// for testing or when the caller is taking responsibility for setting the GVK on encoded objects.
+func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator {
+	return &clientNegotiator{
+		serializer: &simpleNegotiatedSerializer{info: info},
+		encode:     gv,
+	}
+}
+
+type simpleNegotiatedSerializer struct {
+	info SerializerInfo
+}
+
+func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer {
+	return &simpleNegotiatedSerializer{info: info}
+}
+
+func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo {
+	return []SerializerInfo{n.info}
+}
+
+func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder {
+	return e
+}
+
+func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder {
+	return d
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go
new file mode 100644
index 0000000..1cd2e4c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta
+func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
new file mode 100644
index 0000000..29d3ac4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
@@ -0,0 +1,59 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
+
+package schema
+
+import (
+	fmt "fmt"
+
+	math "math"
+
+	proto "github.com/gogo/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d)
+}
+
+var fileDescriptor_0462724132518e0d = []byte{
+	// 185 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30,
+	0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50,
+	0x5e, 0xe6, 0x24, 0x57, 0xc7, 0xb2, 0xfc, 0x47, 0x8e, 0x5d, 0xa9, 0xac, 0x8f, 0xd0, 0xc7, 0x0a,
+	0x0c, 0x0c, 0x6c, 0xdc, 0x17, 0xa9, 0x64, 0x07, 0x94, 0xdd, 0x4f, 0xa7, 0xcf, 0xf7, 0xf3, 0x68,
+	0xfe, 0x27, 0xa1, 0x3d, 0x9a, 0xdc, 0x51, 0x74, 0x94, 0x68, 0xc2, 0x0b, 0xb9, 0xc1, 0x47, 0xdc,
+	0x1f, 0x32, 0x68, 0x2b, 0xfb, 0x51, 0x3b, 0x8a, 0x57, 0x0c, 0x46, 0x61, 0xcc, 0x2e, 0x69, 0x4b,
+	0x38, 0xf5, 0x23, 0x59, 0x89, 0x8a, 0x1c, 0x45, 0x99, 0x68, 0x10, 0x21, 0xfa, 0xe4, 0xbf, 0x7e,
+	0x9a, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0x44, 0x73, 0xdf, 0x7f, 0x4a, 0xa7, 0x31, 0x77,
+	0xa2, 0xf7, 0x16, 0x95, 0x57, 0x1e, 0x2b, 0xef, 0xf2, 0xb9, 0xae, 0x3a, 0xea, 0xd5, 0xb2, 0x87,
+	0xdf, 0x79, 0x03, 0xb6, 0x6c, 0xc0, 0xd6, 0x0d, 0xd8, 0xad, 0x00, 0x9f, 0x0b, 0xf0, 0xa5, 0x00,
+	0x5f, 0x0b, 0xf0, 0x47, 0x01, 0x7e, 0x7f, 0x02, 0x3b, 0x7d, 0xb4, 0xf8, 0x2b, 0x00, 0x00, 0xff,
+	0xff, 0xba, 0x7e, 0x65, 0xf4, 0xd6, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
new file mode 100644
index 0000000..5aeeaa1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
@@ -0,0 +1,26 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.runtime.schema;
+
+// Package-wide variables from generator "generated".
+option go_package = "schema";
+
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
new file mode 100644
index 0000000..994a3e3
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go
@@ -0,0 +1,305 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com`
+// and parses it out into both possibilities.  This code takes no responsibility for knowing which representation was intended
+// but with a knowledge of all GroupVersions, calling code can take a very good guess.  If there are only two segments, then
+// `*GroupVersionResource` is nil.
+// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource`
+func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) {
+	var gvr *GroupVersionResource
+	if strings.Count(arg, ".") >= 2 {
+		s := strings.SplitN(arg, ".", 3)
+		gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]}
+	}
+
+	return gvr, ParseGroupResource(arg)
+}
+
+// ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com`
+// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended
+// but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then
+// `*GroupVersionResource` is nil.
+// `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind`
+func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) {
+	var gvk *GroupVersionKind
+	if strings.Count(arg, ".") >= 2 {
+		s := strings.SplitN(arg, ".", 3)
+		gvk = &GroupVersionKind{Group: s[2], Version: s[1], Kind: s[0]}
+	}
+
+	return gvk, ParseGroupKind(arg)
+}
+
+// GroupResource specifies a Group and a Resource, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+type GroupResource struct {
+	Group    string
+	Resource string
+}
+
+func (gr GroupResource) WithVersion(version string) GroupVersionResource {
+	return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource}
+}
+
+func (gr GroupResource) Empty() bool {
+	return len(gr.Group) == 0 && len(gr.Resource) == 0
+}
+
+func (gr GroupResource) String() string {
+	if len(gr.Group) == 0 {
+		return gr.Resource
+	}
+	return gr.Resource + "." + gr.Group
+}
+
+func ParseGroupKind(gk string) GroupKind {
+	i := strings.Index(gk, ".")
+	if i == -1 {
+		return GroupKind{Kind: gk}
+	}
+
+	return GroupKind{Group: gk[i+1:], Kind: gk[:i]}
+}
+
+// ParseGroupResource turns "resource.group" string into a GroupResource struct.  Empty strings are allowed
+// for each field.
+func ParseGroupResource(gr string) GroupResource {
+	if i := strings.Index(gr, "."); i >= 0 {
+		return GroupResource{Group: gr[i+1:], Resource: gr[:i]}
+	}
+	return GroupResource{Resource: gr}
+}
+
+// GroupVersionResource unambiguously identifies a resource.  It doesn't anonymously include GroupVersion
+// to avoid automatic coercion.  It doesn't use a GroupVersion to avoid custom marshalling
+type GroupVersionResource struct {
+	Group    string
+	Version  string
+	Resource string
+}
+
+func (gvr GroupVersionResource) Empty() bool {
+	return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0
+}
+
+func (gvr GroupVersionResource) GroupResource() GroupResource {
+	return GroupResource{Group: gvr.Group, Resource: gvr.Resource}
+}
+
+func (gvr GroupVersionResource) GroupVersion() GroupVersion {
+	return GroupVersion{Group: gvr.Group, Version: gvr.Version}
+}
+
+func (gvr GroupVersionResource) String() string {
+	return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "")
+}
+
+// GroupKind specifies a Group and a Kind, but does not force a version.  This is useful for identifying
+// concepts during lookup stages without having partially valid types
+type GroupKind struct {
+	Group string
+	Kind  string
+}
+
+func (gk GroupKind) Empty() bool {
+	return len(gk.Group) == 0 && len(gk.Kind) == 0
+}
+
+func (gk GroupKind) WithVersion(version string) GroupVersionKind {
+	return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind}
+}
+
+func (gk GroupKind) String() string {
+	if len(gk.Group) == 0 {
+		return gk.Kind
+	}
+	return gk.Kind + "." + gk.Group
+}
+
+// GroupVersionKind unambiguously identifies a kind.  It doesn't anonymously include GroupVersion
+// to avoid automatic coercion.  It doesn't use a GroupVersion to avoid custom marshalling
+type GroupVersionKind struct {
+	Group   string
+	Version string
+	Kind    string
+}
+
+// Empty returns true if group, version, and kind are empty
+func (gvk GroupVersionKind) Empty() bool {
+	return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0
+}
+
+func (gvk GroupVersionKind) GroupKind() GroupKind {
+	return GroupKind{Group: gvk.Group, Kind: gvk.Kind}
+}
+
+func (gvk GroupVersionKind) GroupVersion() GroupVersion {
+	return GroupVersion{Group: gvk.Group, Version: gvk.Version}
+}
+
+func (gvk GroupVersionKind) String() string {
+	return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind
+}
+
+// GroupVersion contains the "group" and the "version", which uniquely identifies the API.
+type GroupVersion struct {
+	Group   string
+	Version string
+}
+
+// Empty returns true if group and version are empty
+func (gv GroupVersion) Empty() bool {
+	return len(gv.Group) == 0 && len(gv.Version) == 0
+}
+
+// String puts "group" and "version" into a single "group/version" string. For the legacy v1
+// it returns "v1".
+func (gv GroupVersion) String() string {
+	if len(gv.Group) > 0 {
+		return gv.Group + "/" + gv.Version
+	}
+	return gv.Version
+}
+
+// Identifier implements runtime.GroupVersioner interface.
+func (gv GroupVersion) Identifier() string {
+	return gv.String()
+}
+
+// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
+// if none of the options match the group. It prefers a match to group and version over just group.
+// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme.
+// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
+//   in fewer places.
+func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) {
+	for _, gvk := range kinds {
+		if gvk.Group == gv.Group && gvk.Version == gv.Version {
+			return gvk, true
+		}
+	}
+	for _, gvk := range kinds {
+		if gvk.Group == gv.Group {
+			return gv.WithKind(gvk.Kind), true
+		}
+	}
+	return GroupVersionKind{}, false
+}
+
+// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error
+// if it cannot parse the string.
+func ParseGroupVersion(gv string) (GroupVersion, error) {
+	// this can be the internal version for the legacy kube types
+	// TODO once we've cleared the last uses as strings, this special case should be removed.
+	if (len(gv) == 0) || (gv == "/") {
+		return GroupVersion{}, nil
+	}
+
+	switch strings.Count(gv, "/") {
+	case 0:
+		return GroupVersion{"", gv}, nil
+	case 1:
+		i := strings.Index(gv, "/")
+		return GroupVersion{gv[:i], gv[i+1:]}, nil
+	default:
+		return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv)
+	}
+}
+
+// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind.
+func (gv GroupVersion) WithKind(kind string) GroupVersionKind {
+	return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind}
+}
+
+// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource.
+func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
+	return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource}
+}
+
+// GroupVersions can be used to represent a set of desired group versions.
+// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme.
+// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
+//   in fewer places.
+type GroupVersions []GroupVersion
+
+// Identifier implements runtime.GroupVersioner interface.
+func (gvs GroupVersions) Identifier() string {
+	groupVersions := make([]string, 0, len(gvs))
+	for i := range gvs {
+		groupVersions = append(groupVersions, gvs[i].String())
+	}
+	return fmt.Sprintf("[%s]", strings.Join(groupVersions, ","))
+}
+
+// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
+// if none of the options match the group.
+func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) {
+	var targets []GroupVersionKind
+	for _, gv := range gvs {
+		target, ok := gv.KindForGroupVersionKinds(kinds)
+		if !ok {
+			continue
+		}
+		targets = append(targets, target)
+	}
+	if len(targets) == 1 {
+		return targets[0], true
+	}
+	if len(targets) > 1 {
+		return bestMatch(kinds, targets), true
+	}
+	return GroupVersionKind{}, false
+}
+
+// bestMatch tries to pick best matching GroupVersionKind and falls back to the first
+// found if no exact match exists.
+func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind {
+	for _, gvk := range targets {
+		for _, k := range kinds {
+			if k == gvk {
+				return k
+			}
+		}
+	}
+	return targets[0]
+}
+
+// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that
+// do not use TypeMeta.
+func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) {
+	if gvk.Empty() {
+		return "", ""
+	}
+	return gvk.GroupVersion().String(), gvk.Kind
+}
+
+// FromAPIVersionAndKind returns a GVK representing the provided fields for types that
+// do not use TypeMeta. This method exists to support test types and legacy serializations
+// that have a distinct group and kind.
+// TODO: further reduce usage of this method.
+func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind {
+	if gv, err := ParseGroupVersion(apiVersion); err == nil {
+		return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind}
+	}
+	return GroupVersionKind{Kind: kind}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go
new file mode 100644
index 0000000..b570668
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// All objects that are serialized from a Scheme encode their type information. This interface is used
+// by serialization to set type information from the Scheme onto the serialized version of an object.
+// For objects that cannot be serialized or have unique requirements, this interface may be a no-op.
+type ObjectKind interface {
+	// SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil
+	// should clear the current setting.
+	SetGroupVersionKind(kind GroupVersionKind)
+	// GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does
+	// not expose or provide these fields.
+	GroupVersionKind() GroupVersionKind
+}
+
+// EmptyObjectKind implements the ObjectKind interface as a noop
+var EmptyObjectKind = emptyObjectKind{}
+
+type emptyObjectKind struct{}
+
+// SetGroupVersionKind implements the ObjectKind interface
+func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {}
+
+// GroupVersionKind implements the ObjectKind interface
+func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} }
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
new file mode 100644
index 0000000..3b25496
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -0,0 +1,728 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/naming"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Scheme defines methods for serializing and deserializing API objects, a type
+// registry for converting group, version, and kind information to and from Go
+// schemas, and mappings between Go schemas of different versions. A scheme is the
+// foundation for a versioned API and versioned configuration over time.
+//
+// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time
+// identifier for a particular representation of that Type (typically backwards
+// compatible), a Kind is the unique name for that Type within the Version, and a
+// Group identifies a set of Versions, Kinds, and Types that evolve over time. An
+// Unversioned Type is one that is not yet formally bound to a type and is promised
+// to be backwards compatible (effectively a "v1" of a Type that does not expect
+// to break in the future).
+//
+// Schemes are not expected to change at runtime and are only threadsafe after
+// registration is complete.
+type Scheme struct {
+	// versionMap allows one to figure out the go type of an object with
+	// the given version and name.
+	gvkToType map[schema.GroupVersionKind]reflect.Type
+
+	// typeToGroupVersion allows one to find metadata for a given go object.
+	// The reflect.Type we index by should *not* be a pointer.
+	typeToGVK map[reflect.Type][]schema.GroupVersionKind
+
+	// unversionedTypes are transformed without conversion in ConvertToVersion.
+	unversionedTypes map[reflect.Type]schema.GroupVersionKind
+
+	// unversionedKinds are the names of kinds that can be created in the context of any group
+	// or version
+	// TODO: resolve the status of unversioned types.
+	unversionedKinds map[string]reflect.Type
+
+	// Map from version and resource to the corresponding func to convert
+	// resource field labels in that version to internal version.
+	fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc
+
+	// defaulterFuncs is an array of interfaces to be called with an object to provide defaulting
+	// the provided object must be a pointer.
+	defaulterFuncs map[reflect.Type]func(interface{})
+
+	// converter stores all registered conversion functions. It also has
+	// default converting behavior.
+	converter *conversion.Converter
+
+	// versionPriority is a map of groups to ordered lists of versions for those groups indicating the
+	// default priorities of these versions as registered in the scheme
+	versionPriority map[string][]string
+
+	// observedVersions keeps track of the order we've seen versions during type registration
+	observedVersions []schema.GroupVersion
+
+	// schemeName is the name of this scheme.  If you don't specify a name, the stack of the NewScheme caller will be used.
+	// This is useful for error reporting to indicate the origin of the scheme.
+	schemeName string
+}
+
+// FieldLabelConversionFunc converts a field selector to internal representation.
+type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error)
+
+// NewScheme creates a new Scheme. This scheme is pluggable by default.
+func NewScheme() *Scheme {
+	s := &Scheme{
+		gvkToType:                 map[schema.GroupVersionKind]reflect.Type{},
+		typeToGVK:                 map[reflect.Type][]schema.GroupVersionKind{},
+		unversionedTypes:          map[reflect.Type]schema.GroupVersionKind{},
+		unversionedKinds:          map[string]reflect.Type{},
+		fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
+		defaulterFuncs:            map[reflect.Type]func(interface{}){},
+		versionPriority:           map[string][]string{},
+		schemeName:                naming.GetNameFromCallsite(internalPackages...),
+	}
+	s.converter = conversion.NewConverter(s.nameFunc)
+
+	// Enable couple default conversions by default.
+	utilruntime.Must(RegisterEmbeddedConversions(s))
+	utilruntime.Must(RegisterStringConversions(s))
+
+	utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
+	utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
+	return s
+}
+
+// nameFunc returns the name of the type that we wish to use to determine when two types attempt
+// a conversion. Defaults to the go name of the type if the type is not registered.
+func (s *Scheme) nameFunc(t reflect.Type) string {
+	// find the preferred names for this type
+	gvks, ok := s.typeToGVK[t]
+	if !ok {
+		return t.Name()
+	}
+
+	for _, gvk := range gvks {
+		internalGV := gvk.GroupVersion()
+		internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in
+		internalGVK := internalGV.WithKind(gvk.Kind)
+
+		if internalType, exists := s.gvkToType[internalGVK]; exists {
+			return s.typeToGVK[internalType][0].Kind
+		}
+	}
+
+	return gvks[0].Kind
+}
+
+// fromScope gets the input version, desired output version, and desired Scheme
+// from a conversion.Scope.
+func (s *Scheme) fromScope(scope conversion.Scope) *Scheme {
+	return s
+}
+
+// Converter allows access to the converter for the scheme
+func (s *Scheme) Converter() *conversion.Converter {
+	return s.converter
+}
+
+// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules.
+// Whenever an object of this type is serialized, it is serialized with the provided group version and is not
+// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an
+// API group and version that would never be updated.
+//
+// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
+//   every version with particular schemas. Resolve this method at that point.
+func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) {
+	s.addObservedVersion(version)
+	s.AddKnownTypes(version, types...)
+	for _, obj := range types {
+		t := reflect.TypeOf(obj).Elem()
+		gvk := version.WithKind(t.Name())
+		s.unversionedTypes[t] = gvk
+		if old, ok := s.unversionedKinds[gvk.Kind]; ok && t != old {
+			panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique in scheme %q", old.PkgPath(), old.Name(), gvk, s.schemeName))
+		}
+		s.unversionedKinds[gvk.Kind] = t
+	}
+}
+
+// AddKnownTypes registers all types passed in 'types' as being members of version 'version'.
+// All objects passed to types should be pointers to structs. The name that go reports for
+// the struct becomes the "kind" field when encoding. Version may not be empty - use the
+// APIVersionInternal constant if you have a type that does not have a formal version.
+func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) {
+	s.addObservedVersion(gv)
+	for _, obj := range types {
+		t := reflect.TypeOf(obj)
+		if t.Kind() != reflect.Ptr {
+			panic("All types must be pointers to structs.")
+		}
+		t = t.Elem()
+		s.AddKnownTypeWithName(gv.WithKind(t.Name()), obj)
+	}
+}
+
+// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should
+// be encoded as. Useful for testing when you don't want to make multiple packages to define
+// your structs. Version may not be empty - use the APIVersionInternal constant if you have a
+// type that does not have a formal version.
+func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
+	s.addObservedVersion(gvk.GroupVersion())
+	t := reflect.TypeOf(obj)
+	if len(gvk.Version) == 0 {
+		panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t))
+	}
+	if t.Kind() != reflect.Ptr {
+		panic("All types must be pointers to structs.")
+	}
+	t = t.Elem()
+	if t.Kind() != reflect.Struct {
+		panic("All types must be pointers to structs.")
+	}
+
+	if oldT, found := s.gvkToType[gvk]; found && oldT != t {
+		panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v in scheme %q", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name(), s.schemeName))
+	}
+
+	s.gvkToType[gvk] = t
+
+	for _, existingGvk := range s.typeToGVK[t] {
+		if existingGvk == gvk {
+			return
+		}
+	}
+	s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
+
+	// if the type implements DeepCopyInto(<obj>), register a self-conversion
+	if m := reflect.ValueOf(obj).MethodByName("DeepCopyInto"); m.IsValid() && m.Type().NumIn() == 1 && m.Type().NumOut() == 0 && m.Type().In(0) == reflect.TypeOf(obj) {
+		if err := s.AddGeneratedConversionFunc(obj, obj, func(a, b interface{}, scope conversion.Scope) error {
+			// copy a to b
+			reflect.ValueOf(a).MethodByName("DeepCopyInto").Call([]reflect.Value{reflect.ValueOf(b)})
+			// clear TypeMeta to match legacy reflective conversion
+			b.(Object).GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+			return nil
+		}); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// KnownTypes returns the types known for the given version.
+func (s *Scheme) KnownTypes(gv schema.GroupVersion) map[string]reflect.Type {
+	types := make(map[string]reflect.Type)
+	for gvk, t := range s.gvkToType {
+		if gv != gvk.GroupVersion() {
+			continue
+		}
+
+		types[gvk.Kind] = t
+	}
+	return types
+}
+
+// AllKnownTypes returns the all known types.
+func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type {
+	return s.gvkToType
+}
+
+// ObjectKinds returns all possible group,version,kind of the go object, true if the
+// object is considered unversioned, or an error if it's not a pointer or is unregistered.
+func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) {
+	// Unstructured objects are always considered to have their declared GVK
+	if _, ok := obj.(Unstructured); ok {
+		// we require that the GVK be populated in order to recognize the object
+		gvk := obj.GetObjectKind().GroupVersionKind()
+		if len(gvk.Kind) == 0 {
+			return nil, false, NewMissingKindErr("unstructured object has no kind")
+		}
+		if len(gvk.Version) == 0 {
+			return nil, false, NewMissingVersionErr("unstructured object has no version")
+		}
+		return []schema.GroupVersionKind{gvk}, false, nil
+	}
+
+	v, err := conversion.EnforcePtr(obj)
+	if err != nil {
+		return nil, false, err
+	}
+	t := v.Type()
+
+	gvks, ok := s.typeToGVK[t]
+	if !ok {
+		return nil, false, NewNotRegisteredErrForType(s.schemeName, t)
+	}
+	_, unversionedType := s.unversionedTypes[t]
+
+	return gvks, unversionedType, nil
+}
+
+// Recognizes returns true if the scheme is able to handle the provided group,version,kind
+// of an object.
+func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool {
+	_, exists := s.gvkToType[gvk]
+	return exists
+}
+
+func (s *Scheme) IsUnversioned(obj Object) (bool, bool) {
+	v, err := conversion.EnforcePtr(obj)
+	if err != nil {
+		return false, false
+	}
+	t := v.Type()
+
+	if _, ok := s.typeToGVK[t]; !ok {
+		return false, false
+	}
+	_, ok := s.unversionedTypes[t]
+	return ok, true
+}
+
+// New returns a new API object of the given version and name, or an error if it hasn't
+// been registered. The version and kind fields must be specified.
+func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) {
+	if t, exists := s.gvkToType[kind]; exists {
+		return reflect.New(t).Interface().(Object), nil
+	}
+
+	if t, exists := s.unversionedKinds[kind.Kind]; exists {
+		return reflect.New(t).Interface().(Object), nil
+	}
+	return nil, NewNotRegisteredErrForKind(s.schemeName, kind)
+}
+
+// Log sets a logger on the scheme. For test purposes only
+func (s *Scheme) Log(l conversion.DebugLogger) {
+	s.converter.Debug = l
+}
+
+// AddIgnoredConversionType identifies a pair of types that should be skipped by
+// conversion (because the data inside them is explicitly dropped during
+// conversion).
+func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error {
+	return s.converter.RegisterIgnoredConversion(from, to)
+}
+
+// AddConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (s *Scheme) AddConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error {
+	return s.converter.RegisterUntypedConversionFunc(a, b, fn)
+}
+
+// AddGeneratedConversionFunc registers a function that converts between a and b by passing objects of those
+// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
+// any other guarantee.
+func (s *Scheme) AddGeneratedConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error {
+	return s.converter.RegisterGeneratedUntypedConversionFunc(a, b, fn)
+}
+
+// AddFieldLabelConversionFunc adds a conversion function to convert field selectors
+// of the given kind from the given version to internal version representation.
+func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conversionFunc FieldLabelConversionFunc) error {
+	s.fieldLabelConversionFuncs[gvk] = conversionFunc
+	return nil
+}
+
+// RegisterInputDefaults sets the provided field mapping function and field matching
+// as the defaults for the provided input type.  The fn may be nil, in which case no
+// mapping will happen by default. Use this method to register a mechanism for handling
+// a specific input type in conversion, such as a map[string]string to structs.
+func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error {
+	return s.converter.RegisterInputDefaults(in, fn, defaultFlags)
+}
+
+// AddTypeDefaultingFunc registers a function that is passed a pointer to an
+// object and can default fields on the object. These functions will be invoked
+// when Default() is called. The function will never be called unless the
+// defaulted object matches srcType. If this function is invoked twice with the
+// same srcType, the fn passed to the later call will be used instead.
+func (s *Scheme) AddTypeDefaultingFunc(srcType Object, fn func(interface{})) {
+	s.defaulterFuncs[reflect.TypeOf(srcType)] = fn
+}
+
+// Default sets defaults on the provided Object.
+func (s *Scheme) Default(src Object) {
+	if fn, ok := s.defaulterFuncs[reflect.TypeOf(src)]; ok {
+		fn(src)
+	}
+}
+
+// Convert will attempt to convert in into out. Both must be pointers. For easy
+// testing of conversion functions. Returns an error if the conversion isn't
+// possible. You can call this with types that haven't been registered (for example,
+// a to test conversion of types that are nested within registered types). The
+// context interface is passed to the convertor. Convert also supports Unstructured
+// types and will convert them intelligently.
+func (s *Scheme) Convert(in, out interface{}, context interface{}) error {
+	unstructuredIn, okIn := in.(Unstructured)
+	unstructuredOut, okOut := out.(Unstructured)
+	switch {
+	case okIn && okOut:
+		// converting unstructured input to an unstructured output is a straight copy - unstructured
+		// is a "smart holder" and the contents are passed by reference between the two objects
+		unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent())
+		return nil
+
+	case okOut:
+		// if the output is an unstructured object, use the standard Go type to unstructured
+		// conversion. The object must not be internal.
+		obj, ok := in.(Object)
+		if !ok {
+			return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in)
+		}
+		gvks, unversioned, err := s.ObjectKinds(obj)
+		if err != nil {
+			return err
+		}
+		gvk := gvks[0]
+
+		// if no conversion is necessary, convert immediately
+		if unversioned || gvk.Version != APIVersionInternal {
+			content, err := DefaultUnstructuredConverter.ToUnstructured(in)
+			if err != nil {
+				return err
+			}
+			unstructuredOut.SetUnstructuredContent(content)
+			unstructuredOut.GetObjectKind().SetGroupVersionKind(gvk)
+			return nil
+		}
+
+		// attempt to convert the object to an external version first.
+		target, ok := context.(GroupVersioner)
+		if !ok {
+			return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in)
+		}
+		// Convert is implicitly unsafe, so we don't need to perform a safe conversion
+		versioned, err := s.UnsafeConvertToVersion(obj, target)
+		if err != nil {
+			return err
+		}
+		content, err := DefaultUnstructuredConverter.ToUnstructured(versioned)
+		if err != nil {
+			return err
+		}
+		unstructuredOut.SetUnstructuredContent(content)
+		return nil
+
+	case okIn:
+		// converting an unstructured object to any type is modeled by first converting
+		// the input to a versioned type, then running standard conversions
+		typed, err := s.unstructuredToTyped(unstructuredIn)
+		if err != nil {
+			return err
+		}
+		in = typed
+	}
+
+	flags, meta := s.generateConvertMeta(in)
+	meta.Context = context
+	if flags == 0 {
+		flags = conversion.AllowDifferentFieldTypeNames
+	}
+	return s.converter.Convert(in, out, flags, meta)
+}
+
+// ConvertFieldLabel alters the given field label and value for an kind field selector from
+// versioned representation to an unversioned one or returns an error.
+func (s *Scheme) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) {
+	conversionFunc, ok := s.fieldLabelConversionFuncs[gvk]
+	if !ok {
+		return DefaultMetaV1FieldSelectorConversion(label, value)
+	}
+	return conversionFunc(label, value)
+}
+
+// ConvertToVersion attempts to convert an input object to its matching Kind in another
+// version within this scheme. Will return an error if the provided version does not
+// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also
+// return an error if the conversion does not result in a valid Object being
+// returned. Passes target down to the conversion methods as the Context on the scope.
+func (s *Scheme) ConvertToVersion(in Object, target GroupVersioner) (Object, error) {
+	return s.convertToVersion(true, in, target)
+}
+
+// UnsafeConvertToVersion will convert in to the provided target if such a conversion is possible,
+// but does not guarantee the output object does not share fields with the input object. It attempts to be as
+// efficient as possible when doing conversion.
+func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Object, error) {
+	return s.convertToVersion(false, in, target)
+}
+
+// convertToVersion handles conversion with an optional copy.
+func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) {
+	var t reflect.Type
+
+	if u, ok := in.(Unstructured); ok {
+		typed, err := s.unstructuredToTyped(u)
+		if err != nil {
+			return nil, err
+		}
+
+		in = typed
+		// unstructuredToTyped returns an Object, which must be a pointer to a struct.
+		t = reflect.TypeOf(in).Elem()
+
+	} else {
+		// determine the incoming kinds with as few allocations as possible.
+		t = reflect.TypeOf(in)
+		if t.Kind() != reflect.Ptr {
+			return nil, fmt.Errorf("only pointer types may be converted: %v", t)
+		}
+		t = t.Elem()
+		if t.Kind() != reflect.Struct {
+			return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t)
+		}
+	}
+
+	kinds, ok := s.typeToGVK[t]
+	if !ok || len(kinds) == 0 {
+		return nil, NewNotRegisteredErrForType(s.schemeName, t)
+	}
+
+	gvk, ok := target.KindForGroupVersionKinds(kinds)
+	if !ok {
+		// try to see if this type is listed as unversioned (for legacy support)
+		// TODO: when we move to server API versions, we should completely remove the unversioned concept
+		if unversionedKind, ok := s.unversionedTypes[t]; ok {
+			if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok {
+				return copyAndSetTargetKind(copy, in, gvk)
+			}
+			return copyAndSetTargetKind(copy, in, unversionedKind)
+		}
+		return nil, NewNotRegisteredErrForTarget(s.schemeName, t, target)
+	}
+
+	// target wants to use the existing type, set kind and return (no conversion necessary)
+	for _, kind := range kinds {
+		if gvk == kind {
+			return copyAndSetTargetKind(copy, in, gvk)
+		}
+	}
+
+	// type is unversioned, no conversion necessary
+	if unversionedKind, ok := s.unversionedTypes[t]; ok {
+		if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok {
+			return copyAndSetTargetKind(copy, in, gvk)
+		}
+		return copyAndSetTargetKind(copy, in, unversionedKind)
+	}
+
+	out, err := s.New(gvk)
+	if err != nil {
+		return nil, err
+	}
+
+	if copy {
+		in = in.DeepCopyObject()
+	}
+
+	flags, meta := s.generateConvertMeta(in)
+	meta.Context = target
+	if err := s.converter.Convert(in, out, flags, meta); err != nil {
+		return nil, err
+	}
+
+	setTargetKind(out, gvk)
+	return out, nil
+}
+
+// unstructuredToTyped attempts to transform an unstructured object to a typed
+// object if possible. It will return an error if conversion is not possible, or the versioned
+// Go form of the object. Note that this conversion will lose fields.
+func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) {
+	// the type must be something we recognize
+	gvks, _, err := s.ObjectKinds(in)
+	if err != nil {
+		return nil, err
+	}
+	typed, err := s.New(gvks[0])
+	if err != nil {
+		return nil, err
+	}
+	if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil {
+		return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err)
+	}
+	return typed, nil
+}
+
+// generateConvertMeta constructs the meta value we pass to Convert.
+func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) {
+	return s.converter.DefaultMeta(reflect.TypeOf(in))
+}
+
+// copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful.
+func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) {
+	if copy {
+		obj = obj.DeepCopyObject()
+	}
+	setTargetKind(obj, kind)
+	return obj, nil
+}
+
+// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version.
+func setTargetKind(obj Object, kind schema.GroupVersionKind) {
+	if kind.Version == APIVersionInternal {
+		// internal is a special case
+		// TODO: look at removing the need to special case this
+		obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+		return
+	}
+	obj.GetObjectKind().SetGroupVersionKind(kind)
+}
+
+// SetVersionPriority allows specifying a precise order of priority. All specified versions must be in the same group,
+// and the specified order overwrites any previously specified order for this group
+func (s *Scheme) SetVersionPriority(versions ...schema.GroupVersion) error {
+	groups := sets.String{}
+	order := []string{}
+	for _, version := range versions {
+		if len(version.Version) == 0 || version.Version == APIVersionInternal {
+			return fmt.Errorf("internal versions cannot be prioritized: %v", version)
+		}
+
+		groups.Insert(version.Group)
+		order = append(order, version.Version)
+	}
+	if len(groups) != 1 {
+		return fmt.Errorf("must register versions for exactly one group: %v", strings.Join(groups.List(), ", "))
+	}
+
+	s.versionPriority[groups.List()[0]] = order
+	return nil
+}
+
+// PrioritizedVersionsForGroup returns versions for a single group in priority order
+func (s *Scheme) PrioritizedVersionsForGroup(group string) []schema.GroupVersion {
+	ret := []schema.GroupVersion{}
+	for _, version := range s.versionPriority[group] {
+		ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+	}
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion.Group != group {
+			continue
+		}
+		found := false
+		for _, existing := range ret {
+			if existing == observedVersion {
+				found = true
+				break
+			}
+		}
+		if !found {
+			ret = append(ret, observedVersion)
+		}
+	}
+
+	return ret
+}
+
+// PrioritizedVersionsAllGroups returns all known versions in their priority order.  Groups are random, but
+// versions for a single group are prioritized
+func (s *Scheme) PrioritizedVersionsAllGroups() []schema.GroupVersion {
+	ret := []schema.GroupVersion{}
+	for group, versions := range s.versionPriority {
+		for _, version := range versions {
+			ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+		}
+	}
+	for _, observedVersion := range s.observedVersions {
+		found := false
+		for _, existing := range ret {
+			if existing == observedVersion {
+				found = true
+				break
+			}
+		}
+		if !found {
+			ret = append(ret, observedVersion)
+		}
+	}
+	return ret
+}
+
+// PreferredVersionAllGroups returns the most preferred version for every group.
+// group ordering is random.
+func (s *Scheme) PreferredVersionAllGroups() []schema.GroupVersion {
+	ret := []schema.GroupVersion{}
+	for group, versions := range s.versionPriority {
+		for _, version := range versions {
+			ret = append(ret, schema.GroupVersion{Group: group, Version: version})
+			break
+		}
+	}
+	for _, observedVersion := range s.observedVersions {
+		found := false
+		for _, existing := range ret {
+			if existing.Group == observedVersion.Group {
+				found = true
+				break
+			}
+		}
+		if !found {
+			ret = append(ret, observedVersion)
+		}
+	}
+
+	return ret
+}
+
+// IsGroupRegistered returns true if types for the group have been registered with the scheme
+func (s *Scheme) IsGroupRegistered(group string) bool {
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion.Group == group {
+			return true
+		}
+	}
+	return false
+}
+
+// IsVersionRegistered returns true if types for the version have been registered with the scheme
+func (s *Scheme) IsVersionRegistered(version schema.GroupVersion) bool {
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion == version {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (s *Scheme) addObservedVersion(version schema.GroupVersion) {
+	if len(version.Version) == 0 || version.Version == APIVersionInternal {
+		return
+	}
+	for _, observedVersion := range s.observedVersions {
+		if observedVersion == version {
+			return
+		}
+	}
+
+	s.observedVersions = append(s.observedVersions, version)
+}
+
+func (s *Scheme) Name() string {
+	return s.schemeName
+}
+
+// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
+// call chains to NewReflector, so they'd be low entropy names for reflectors
+var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go
new file mode 100644
index 0000000..944db48
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+// SchemeBuilder collects functions that add things to a scheme. It's to allow
+// code to compile without explicitly referencing generated types. You should
+// declare one in each package that will have generated deep copy or conversion
+// functions.
+type SchemeBuilder []func(*Scheme) error
+
+// AddToScheme applies all the stored functions to the scheme. A non-nil error
+// indicates that one function failed and the attempt was abandoned.
+func (sb *SchemeBuilder) AddToScheme(s *Scheme) error {
+	for _, f := range *sb {
+		if err := f(s); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Register adds a scheme setup function to the list.
+func (sb *SchemeBuilder) Register(funcs ...func(*Scheme) error) {
+	for _, f := range funcs {
+		*sb = append(*sb, f)
+	}
+}
+
+// NewSchemeBuilder calls Register for you.
+func NewSchemeBuilder(funcs ...func(*Scheme) error) SchemeBuilder {
+	var sb SchemeBuilder
+	sb.Register(funcs...)
+	return sb
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
new file mode 100644
index 0000000..f21b0ef
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -0,0 +1,324 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+	"mime"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+	"k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+)
+
+// serializerExtensions are for serializers that are conditionally compiled in
+var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
+
+type serializerType struct {
+	AcceptContentTypes []string
+	ContentType        string
+	FileExtensions     []string
+	// EncodesAsText should be true if this content type can be represented safely in UTF-8
+	EncodesAsText bool
+
+	Serializer       runtime.Serializer
+	PrettySerializer runtime.Serializer
+
+	AcceptStreamContentTypes []string
+	StreamContentType        string
+
+	Framer           runtime.Framer
+	StreamSerializer runtime.Serializer
+}
+
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
+	jsonSerializer := json.NewSerializerWithOptions(
+		mf, scheme, scheme,
+		json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
+	)
+	jsonSerializerType := serializerType{
+		AcceptContentTypes: []string{runtime.ContentTypeJSON},
+		ContentType:        runtime.ContentTypeJSON,
+		FileExtensions:     []string{"json"},
+		EncodesAsText:      true,
+		Serializer:         jsonSerializer,
+
+		Framer:           json.Framer,
+		StreamSerializer: jsonSerializer,
+	}
+	if options.Pretty {
+		jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
+			mf, scheme, scheme,
+			json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict},
+		)
+	}
+
+	yamlSerializer := json.NewSerializerWithOptions(
+		mf, scheme, scheme,
+		json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
+	)
+	protoSerializer := protobuf.NewSerializer(scheme, scheme)
+	protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
+
+	serializers := []serializerType{
+		jsonSerializerType,
+		{
+			AcceptContentTypes: []string{runtime.ContentTypeYAML},
+			ContentType:        runtime.ContentTypeYAML,
+			FileExtensions:     []string{"yaml"},
+			EncodesAsText:      true,
+			Serializer:         yamlSerializer,
+		},
+		{
+			AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
+			ContentType:        runtime.ContentTypeProtobuf,
+			FileExtensions:     []string{"pb"},
+			Serializer:         protoSerializer,
+
+			Framer:           protobuf.LengthDelimitedFramer,
+			StreamSerializer: protoRawSerializer,
+		},
+	}
+
+	for _, fn := range serializerExtensions {
+		if serializer, ok := fn(scheme); ok {
+			serializers = append(serializers, serializer)
+		}
+	}
+	return serializers
+}
+
+// CodecFactory provides methods for retrieving codecs and serializers for specific
+// versions and content types.
+type CodecFactory struct {
+	scheme      *runtime.Scheme
+	serializers []serializerType
+	universal   runtime.Decoder
+	accepts     []runtime.SerializerInfo
+
+	legacySerializer runtime.Serializer
+}
+
+// CodecFactoryOptions holds the options for configuring CodecFactory behavior
+type CodecFactoryOptions struct {
+	// Strict configures all serializers in strict mode
+	Strict bool
+	// Pretty includes a pretty serializer along with the non-pretty one
+	Pretty bool
+}
+
+// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
+// Functions implementing this type can be passed to the NewCodecFactory() constructor.
+type CodecFactoryOptionsMutator func(*CodecFactoryOptions)
+
+// EnablePretty enables including a pretty serializer along with the non-pretty one
+func EnablePretty(options *CodecFactoryOptions) {
+	options.Pretty = true
+}
+
+// DisablePretty disables including a pretty serializer along with the non-pretty one
+func DisablePretty(options *CodecFactoryOptions) {
+	options.Pretty = false
+}
+
+// EnableStrict enables configuring all serializers in strict mode
+func EnableStrict(options *CodecFactoryOptions) {
+	options.Strict = true
+}
+
+// DisableStrict disables configuring all serializers in strict mode
+func DisableStrict(options *CodecFactoryOptions) {
+	options.Strict = false
+}
+
+// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
+// and conversion wrappers to define preferred internal and external versions. In the future,
+// as the internal version is used less, callers may instead use a defaulting serializer and
+// only convert objects which are shared internally (Status, common API machinery).
+//
+// Mutators can be passed to change the CodecFactoryOptions before construction of the factory.
+// It is recommended to explicitly pass mutators instead of relying on defaults.
+// By default, Pretty is enabled -- this is conformant with previously supported behavior.
+//
+// TODO: allow other codecs to be compiled in?
+// TODO: accept a scheme interface
+func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory {
+	options := CodecFactoryOptions{Pretty: true}
+	for _, fn := range mutators {
+		fn(&options)
+	}
+
+	serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options)
+	return newCodecFactory(scheme, serializers)
+}
+
+// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
+func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+	decoders := make([]runtime.Decoder, 0, len(serializers))
+	var accepts []runtime.SerializerInfo
+	alreadyAccepted := make(map[string]struct{})
+
+	var legacySerializer runtime.Serializer
+	for _, d := range serializers {
+		decoders = append(decoders, d.Serializer)
+		for _, mediaType := range d.AcceptContentTypes {
+			if _, ok := alreadyAccepted[mediaType]; ok {
+				continue
+			}
+			alreadyAccepted[mediaType] = struct{}{}
+			info := runtime.SerializerInfo{
+				MediaType:        d.ContentType,
+				EncodesAsText:    d.EncodesAsText,
+				Serializer:       d.Serializer,
+				PrettySerializer: d.PrettySerializer,
+			}
+
+			mediaType, _, err := mime.ParseMediaType(info.MediaType)
+			if err != nil {
+				panic(err)
+			}
+			parts := strings.SplitN(mediaType, "/", 2)
+			info.MediaTypeType = parts[0]
+			info.MediaTypeSubType = parts[1]
+
+			if d.StreamSerializer != nil {
+				info.StreamSerializer = &runtime.StreamSerializerInfo{
+					Serializer:    d.StreamSerializer,
+					EncodesAsText: d.EncodesAsText,
+					Framer:        d.Framer,
+				}
+			}
+			accepts = append(accepts, info)
+			if mediaType == runtime.ContentTypeJSON {
+				legacySerializer = d.Serializer
+			}
+		}
+	}
+	if legacySerializer == nil {
+		legacySerializer = serializers[0].Serializer
+	}
+
+	return CodecFactory{
+		scheme:      scheme,
+		serializers: serializers,
+		universal:   recognizer.NewDecoder(decoders...),
+
+		accepts: accepts,
+
+		legacySerializer: legacySerializer,
+	}
+}
+
+// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the
+// caller requests it.
+func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer {
+	return WithoutConversionCodecFactory{f}
+}
+
+// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
+func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
+	return f.accepts
+}
+
+// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from
+// any recognized source. The returned codec will always encode output to JSON. If a type is not
+// found in the list of versions an error will be returned.
+//
+// This method is deprecated - clients and servers should negotiate a serializer by mime-type and
+// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
+//
+// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions.
+//   All other callers will be forced to request a Codec directly.
+func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec {
+	return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner)
+}
+
+// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies
+// runtime.Object. It does not perform conversion. It does not perform defaulting.
+func (f CodecFactory) UniversalDeserializer() runtime.Decoder {
+	return f.universal
+}
+
+// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used
+// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes
+// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate
+// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified,
+// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs
+// defaulting.
+//
+// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form
+// TODO: only accept a group versioner
+func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder {
+	var versioner runtime.GroupVersioner
+	if len(versions) == 0 {
+		versioner = runtime.InternalGroupVersioner
+	} else {
+		versioner = schema.GroupVersions(versions)
+	}
+	return f.CodecForVersions(nil, f.universal, nil, versioner)
+}
+
+// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list,
+// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not
+// converted. If encode or decode are nil, no conversion is performed.
+func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec {
+	// TODO: these are for backcompat, remove them in the future
+	if encode == nil {
+		encode = runtime.DisabledGroupVersioner
+	}
+	if decode == nil {
+		decode = runtime.InternalGroupVersioner
+	}
+	return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode)
+}
+
+// DecoderToVersion returns a decoder that targets the provided group version.
+func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
+	return f.CodecForVersions(nil, decoder, nil, gv)
+}
+
+// EncoderForVersion returns an encoder that targets the provided group version.
+func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
+	return f.CodecForVersions(encoder, nil, gv, nil)
+}
+
+// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion.
+// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future
+// will be unnecessary when we change the signature of NegotiatedSerializer.
+type WithoutConversionCodecFactory struct {
+	CodecFactory
+}
+
+// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object
+// when serialized.
+func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
+	return runtime.WithVersionEncoder{
+		Version:     version,
+		Encoder:     serializer,
+		ObjectTyper: f.CodecFactory.scheme,
+	}
+}
+
+// DecoderToVersion returns an decoder that does not do conversion.
+func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
+	return runtime.WithoutVersionDecoder{
+		Decoder: serializer,
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
new file mode 100644
index 0000000..e081d7f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
@@ -0,0 +1,388 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"encoding/json"
+	"io"
+	"strconv"
+	"unsafe"
+
+	jsoniter "github.com/json-iterator/go"
+	"github.com/modern-go/reflect2"
+	"sigs.k8s.io/yaml"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/util/framer"
+	utilyaml "k8s.io/apimachinery/pkg/util/yaml"
+	"k8s.io/klog/v2"
+)
+
+// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
+// is not nil, the object has the group, version, and kind fields set.
+// Deprecated: use NewSerializerWithOptions instead.
+func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
+	return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
+}
+
+// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
+// matches JSON, and will error if constructs are used that do not serialize to JSON.
+// Deprecated: use NewSerializerWithOptions instead.
+func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+	return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
+}
+
+// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
+// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer
+// and are immutable.
+func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer {
+	return &Serializer{
+		meta:       meta,
+		creater:    creater,
+		typer:      typer,
+		options:    options,
+		identifier: identifier(options),
+	}
+}
+
+// identifier computes Identifier of Encoder based on the given options.
+func identifier(options SerializerOptions) runtime.Identifier {
+	result := map[string]string{
+		"name":   "json",
+		"yaml":   strconv.FormatBool(options.Yaml),
+		"pretty": strconv.FormatBool(options.Pretty),
+	}
+	identifier, err := json.Marshal(result)
+	if err != nil {
+		klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err)
+	}
+	return runtime.Identifier(identifier)
+}
+
+// SerializerOptions holds the options which are used to configure a JSON/YAML serializer.
+// example:
+// (1) To configure a JSON serializer, set `Yaml` to `false`.
+// (2) To configure a YAML serializer, set `Yaml` to `true`.
+// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`.
+type SerializerOptions struct {
+	// Yaml: configures the Serializer to work with JSON(false) or YAML(true).
+	// When `Yaml` is enabled, this serializer only supports the subset of YAML that
+	// matches JSON, and will error if constructs are used that do not serialize to JSON.
+	Yaml bool
+
+	// Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output.
+	// This option is silently ignored when `Yaml` is `true`.
+	Pretty bool
+
+	// Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
+	// Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
+	Strict bool
+}
+
+type Serializer struct {
+	meta    MetaFactory
+	options SerializerOptions
+	creater runtime.ObjectCreater
+	typer   runtime.ObjectTyper
+
+	identifier runtime.Identifier
+}
+
+// Serializer implements Serializer
+var _ runtime.Serializer = &Serializer{}
+var _ recognizer.RecognizingDecoder = &Serializer{}
+
+type customNumberExtension struct {
+	jsoniter.DummyExtension
+}
+
+func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
+	if typ.String() == "interface {}" {
+		return customNumberDecoder{}
+	}
+	return nil
+}
+
+type customNumberDecoder struct {
+}
+
+func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+	switch iter.WhatIsNext() {
+	case jsoniter.NumberValue:
+		var number jsoniter.Number
+		iter.ReadVal(&number)
+		i64, err := strconv.ParseInt(string(number), 10, 64)
+		if err == nil {
+			*(*interface{})(ptr) = i64
+			return
+		}
+		f64, err := strconv.ParseFloat(string(number), 64)
+		if err == nil {
+			*(*interface{})(ptr) = f64
+			return
+		}
+		iter.ReportError("DecodeNumber", err.Error())
+	default:
+		*(*interface{})(ptr) = iter.Read()
+	}
+}
+
+// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive when unmarshalling, and otherwise compatible with
+// the encoding/json standard library.
+func CaseSensitiveJsonIterator() jsoniter.API {
+	config := jsoniter.Config{
+		EscapeHTML:             true,
+		SortMapKeys:            true,
+		ValidateJsonRawMessage: true,
+		CaseSensitive:          true,
+	}.Froze()
+	// Force jsoniter to decode number to interface{} via int64/float64, if possible.
+	config.RegisterExtension(&customNumberExtension{})
+	return config
+}
+
+// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
+// the encoding/json standard library.
+func StrictCaseSensitiveJsonIterator() jsoniter.API {
+	config := jsoniter.Config{
+		EscapeHTML:             true,
+		SortMapKeys:            true,
+		ValidateJsonRawMessage: true,
+		CaseSensitive:          true,
+		DisallowUnknownFields:  true,
+	}.Froze()
+	// Force jsoniter to decode number to interface{} via int64/float64, if possible.
+	config.RegisterExtension(&customNumberExtension{})
+	return config
+}
+
+// Private copies of jsoniter to try to shield against possible mutations
+// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
+// in some other library will mess with every usage of the jsoniter library in the whole program.
+// See https://github.com/json-iterator/go/issues/265
+var caseSensitiveJsonIterator = CaseSensitiveJsonIterator()
+var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator()
+
+// gvkWithDefaults returns group kind and version defaulting from provided default
+func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
+	if len(actual.Kind) == 0 {
+		actual.Kind = defaultGVK.Kind
+	}
+	if len(actual.Version) == 0 && len(actual.Group) == 0 {
+		actual.Group = defaultGVK.Group
+		actual.Version = defaultGVK.Version
+	}
+	if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
+		actual.Version = defaultGVK.Version
+	}
+	return actual
+}
+
+// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then
+// load that data into an object matching the desired schema kind or the provided into.
+// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed.
+// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling.
+// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk.
+// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk)
+// On success or most errors, the method will return the calculated schema kind.
+// The gvk calculate priority will be originalData > default gvk > into
+func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	data := originalData
+	if s.options.Yaml {
+		altered, err := yaml.YAMLToJSON(data)
+		if err != nil {
+			return nil, nil, err
+		}
+		data = altered
+	}
+
+	actual, err := s.meta.Interpret(data)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if gvk != nil {
+		*actual = gvkWithDefaults(*actual, *gvk)
+	}
+
+	if unk, ok := into.(*runtime.Unknown); ok && unk != nil {
+		unk.Raw = originalData
+		unk.ContentType = runtime.ContentTypeJSON
+		unk.GetObjectKind().SetGroupVersionKind(*actual)
+		return unk, actual, nil
+	}
+
+	if into != nil {
+		_, isUnstructured := into.(runtime.Unstructured)
+		types, _, err := s.typer.ObjectKinds(into)
+		switch {
+		case runtime.IsNotRegisteredError(err), isUnstructured:
+			if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil {
+				return nil, actual, err
+			}
+			return into, actual, nil
+		case err != nil:
+			return nil, actual, err
+		default:
+			*actual = gvkWithDefaults(*actual, types[0])
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, actual, runtime.NewMissingKindErr(string(originalData))
+	}
+	if len(actual.Version) == 0 {
+		return nil, actual, runtime.NewMissingVersionErr(string(originalData))
+	}
+
+	// use the target if necessary
+	obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil {
+		return nil, actual, err
+	}
+
+	// If the deserializer is non-strict, return successfully here.
+	if !s.options.Strict {
+		return obj, actual, nil
+	}
+
+	// In strict mode pass the data trough the YAMLToJSONStrict converter.
+	// This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data,
+	// the output would equal the input, unless there is a parsing error such as duplicate fields.
+	// As we know this was successful in the non-strict case, the only error that may be returned here
+	// is because of the newly-added strictness. hence we know we can return the typed strictDecoderError
+	// the actual error is that the object contains duplicate fields.
+	altered, err := yaml.YAMLToJSONStrict(originalData)
+	if err != nil {
+		return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
+	}
+	// As performance is not an issue for now for the strict deserializer (one has regardless to do
+	// the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated
+	// fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is
+	// due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
+	// the actual error is that the object contains unknown field.
+	strictObj := obj.DeepCopyObject()
+	if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil {
+		return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
+	}
+	// Always return the same object as the non-strict serializer to avoid any deviations.
+	return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, w)
+	}
+	return s.doEncode(obj, w)
+}
+
+func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
+	if s.options.Yaml {
+		json, err := caseSensitiveJsonIterator.Marshal(obj)
+		if err != nil {
+			return err
+		}
+		data, err := yaml.JSONToYAML(json)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	}
+
+	if s.options.Pretty {
+		data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", "  ")
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	}
+	encoder := json.NewEncoder(w)
+	return encoder.Encode(obj)
+}
+
+// Identifier implements runtime.Encoder interface.
+func (s *Serializer) Identifier() runtime.Identifier {
+	return s.identifier
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
+	if s.options.Yaml {
+		// we could potentially look for '---'
+		return false, true, nil
+	}
+	_, _, ok = utilyaml.GuessJSONStream(peek, 2048)
+	return ok, false, nil
+}
+
+// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
+var Framer = jsonFramer{}
+
+type jsonFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer {
+	// we can write JSON objects directly to the writer, because they are self-framing
+	return w
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	// we need to extract the JSON chunks of data to pass to Decode()
+	return framer.NewJSONFramedReader(r)
+}
+
+// YAMLFramer is the default JSON framing behavior, with newlines delimiting individual objects.
+var YAMLFramer = yamlFramer{}
+
+type yamlFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer {
+	return yamlFrameWriter{w}
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	// extract the YAML document chunks directly
+	return utilyaml.NewDocumentDecoder(r)
+}
+
+type yamlFrameWriter struct {
+	w io.Writer
+}
+
+// Write separates each document with the YAML document separator (`---` followed by line
+// break). Writers must write well formed YAML documents (include a final line break).
+func (w yamlFrameWriter) Write(data []byte) (n int, err error) {
+	if _, err := w.w.Write([]byte("---\n")); err != nil {
+		return 0, err
+	}
+	return w.w.Write(data)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
new file mode 100644
index 0000000..df3f5f9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// MetaFactory is used to store and retrieve the version and kind
+// information for JSON objects in a serializer.
+type MetaFactory interface {
+	// Interpret should return the version and kind of the wire-format of
+	// the object.
+	Interpret(data []byte) (*schema.GroupVersionKind, error)
+}
+
+// DefaultMetaFactory is a default factory for versioning objects in JSON. The object
+// in memory and in the default JSON serialization will use the "kind" and "apiVersion"
+// fields.
+var DefaultMetaFactory = SimpleMetaFactory{}
+
+// SimpleMetaFactory provides default methods for retrieving the type and version of objects
+// that are identified with an "apiVersion" and "kind" fields in their JSON
+// serialization. It may be parameterized with the names of the fields in memory, or an
+// optional list of base structs to search for those fields in memory.
+type SimpleMetaFactory struct {
+}
+
+// Interpret will return the APIVersion and Kind of the JSON wire-format
+// encoding of an object, or an error.
+func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
+	findKind := struct {
+		// +optional
+		APIVersion string `json:"apiVersion,omitempty"`
+		// +optional
+		Kind string `json:"kind,omitempty"`
+	}{}
+	if err := json.Unmarshal(data, &findKind); err != nil {
+		return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err)
+	}
+	gv, err := schema.ParseGroupVersion(findKind.APIVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
new file mode 100644
index 0000000..a42b4a4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// TODO: We should split negotiated serializers that we can change versions on from those we can change
+// serialization formats on
+type negotiatedSerializerWrapper struct {
+	info runtime.SerializerInfo
+}
+
+func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer {
+	return &negotiatedSerializerWrapper{info}
+}
+
+func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo {
+	return []runtime.SerializerInfo{n.info}
+}
+
+func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder {
+	return e
+}
+
+func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder {
+	return d
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
new file mode 100644
index 0000000..72d0ac7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package protobuf provides a Kubernetes serializer for the protobuf format.
+package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
new file mode 100644
index 0000000..f606b7d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
@@ -0,0 +1,472 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package protobuf
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+	"reflect"
+
+	"github.com/gogo/protobuf/proto"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/util/framer"
+)
+
+var (
+	// protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All
+	// proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth
+	// byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that
+	// the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2).
+	//
+	// See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message.
+	//
+	// This encoding scheme is experimental, and is subject to change at any time.
+	protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00}
+)
+
+type errNotMarshalable struct {
+	t reflect.Type
+}
+
+func (e errNotMarshalable) Error() string {
+	return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t)
+}
+
+func (e errNotMarshalable) Status() metav1.Status {
+	return metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusNotAcceptable,
+		Reason:  metav1.StatusReason("NotAcceptable"),
+		Message: e.Error(),
+	}
+}
+
+func IsNotMarshalable(err error) bool {
+	_, ok := err.(errNotMarshalable)
+	return err != nil && ok
+}
+
+// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
+// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
+// as-is (any type info passed with the object will be used).
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+	return &Serializer{
+		prefix:  protoEncodingPrefix,
+		creater: creater,
+		typer:   typer,
+	}
+}
+
+type Serializer struct {
+	prefix  []byte
+	creater runtime.ObjectCreater
+	typer   runtime.ObjectTyper
+}
+
+var _ runtime.Serializer = &Serializer{}
+var _ recognizer.RecognizingDecoder = &Serializer{}
+
+const serializerIdentifier runtime.Identifier = "protobuf"
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	prefixLen := len(s.prefix)
+	switch {
+	case len(originalData) == 0:
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty data")
+	case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]):
+		return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix)
+	case len(originalData) == prefixLen:
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty body")
+	}
+
+	data := originalData[prefixLen:]
+	unk := runtime.Unknown{}
+	if err := unk.Unmarshal(data); err != nil {
+		return nil, nil, err
+	}
+
+	actual := unk.GroupVersionKind()
+	copyKindDefaults(&actual, gvk)
+
+	if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+		*intoUnknown = unk
+		if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok {
+			intoUnknown.ContentType = runtime.ContentTypeProtobuf
+		}
+		return intoUnknown, &actual, nil
+	}
+
+	if into != nil {
+		types, _, err := s.typer.ObjectKinds(into)
+		switch {
+		case runtime.IsNotRegisteredError(err):
+			pb, ok := into.(proto.Message)
+			if !ok {
+				return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
+			}
+			if err := proto.Unmarshal(unk.Raw, pb); err != nil {
+				return nil, &actual, err
+			}
+			return into, &actual, nil
+		case err != nil:
+			return nil, &actual, err
+		default:
+			copyKindDefaults(&actual, &types[0])
+			// if the result of defaulting did not set a version or group, ensure that at least group is set
+			// (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+			// of into is set if there is no better information from the caller or object.
+			if len(actual.Version) == 0 && len(actual.Group) == 0 {
+				actual.Group = types[0].Group
+			}
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta))
+	}
+	if len(actual.Version) == 0 {
+		return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta))
+	}
+
+	return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, w)
+	}
+	return s.doEncode(obj, w)
+}
+
+func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
+	prefixSize := uint64(len(s.prefix))
+
+	var unk runtime.Unknown
+	switch t := obj.(type) {
+	case *runtime.Unknown:
+		estimatedSize := prefixSize + uint64(t.Size())
+		data := make([]byte, estimatedSize)
+		i, err := t.MarshalTo(data[prefixSize:])
+		if err != nil {
+			return err
+		}
+		copy(data, s.prefix)
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+	default:
+		kind := obj.GetObjectKind().GroupVersionKind()
+		unk = runtime.Unknown{
+			TypeMeta: runtime.TypeMeta{
+				Kind:       kind.Kind,
+				APIVersion: kind.GroupVersion().String(),
+			},
+		}
+	}
+
+	switch t := obj.(type) {
+	case bufferedMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalToSizedBuffer methods
+		encodedSize := uint64(t.Size())
+		estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
+		data := make([]byte, estimatedSize)
+
+		i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
+		if err != nil {
+			return err
+		}
+
+		copy(data, s.prefix)
+
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+
+	case proto.Marshaler:
+		// this path performs extra allocations
+		data, err := t.Marshal()
+		if err != nil {
+			return err
+		}
+		unk.Raw = data
+
+		estimatedSize := prefixSize + uint64(unk.Size())
+		data = make([]byte, estimatedSize)
+
+		i, err := unk.MarshalTo(data[prefixSize:])
+		if err != nil {
+			return err
+		}
+
+		copy(data, s.prefix)
+
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+
+	default:
+		// TODO: marshal with a different content type and serializer (JSON for third party objects)
+		return errNotMarshalable{reflect.TypeOf(obj)}
+	}
+}
+
+// Identifier implements runtime.Encoder interface.
+func (s *Serializer) Identifier() runtime.Identifier {
+	return serializerIdentifier
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) {
+	prefix := make([]byte, 4)
+	n, err := peek.Read(prefix)
+	if err != nil {
+		if err == io.EOF {
+			return false, false, nil
+		}
+		return false, false, err
+	}
+	if n != 4 {
+		return false, false, nil
+	}
+	return bytes.Equal(s.prefix, prefix), false, nil
+}
+
+// copyKindDefaults defaults dst to the value in src if dst does not have a value set.
+func copyKindDefaults(dst, src *schema.GroupVersionKind) {
+	if src == nil {
+		return
+	}
+	// apply kind and version defaulting from provided default
+	if len(dst.Kind) == 0 {
+		dst.Kind = src.Kind
+	}
+	if len(dst.Version) == 0 && len(src.Version) > 0 {
+		dst.Group = src.Group
+		dst.Version = src.Version
+	}
+}
+
+// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
+// byte buffers by pre-calculating the size of the final buffer needed.
+type bufferedMarshaller interface {
+	proto.Sizer
+	runtime.ProtobufMarshaller
+}
+
+// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently.
+type bufferedReverseMarshaller interface {
+	proto.Sizer
+	runtime.ProtobufReverseMarshaller
+}
+
+// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
+// object with a nil RawJSON struct and the expected size of the provided buffer. The
+// returned size will not be correct if RawJSOn is set on unk.
+func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
+	size := uint64(unk.Size())
+	// protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here),
+	// and the size of the array.
+	size += 1 + 8 + byteSize
+	return size
+}
+
+// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the
+// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
+//
+// This encoding scheme is experimental, and is subject to change at any time.
+func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer {
+	return &RawSerializer{
+		creater: creater,
+		typer:   typer,
+	}
+}
+
+// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
+// type).
+type RawSerializer struct {
+	creater runtime.ObjectCreater
+	typer   runtime.ObjectTyper
+}
+
+var _ runtime.Serializer = &RawSerializer{}
+
+const rawSerializerIdentifier runtime.Identifier = "raw-protobuf"
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	if into == nil {
+		return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
+	}
+
+	if len(originalData) == 0 {
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty data")
+	}
+	data := originalData
+
+	actual := &schema.GroupVersionKind{}
+	copyKindDefaults(actual, gvk)
+
+	if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+		intoUnknown.Raw = data
+		intoUnknown.ContentEncoding = ""
+		intoUnknown.ContentType = runtime.ContentTypeProtobuf
+		intoUnknown.SetGroupVersionKind(*actual)
+		return intoUnknown, actual, nil
+	}
+
+	types, _, err := s.typer.ObjectKinds(into)
+	switch {
+	case runtime.IsNotRegisteredError(err):
+		pb, ok := into.(proto.Message)
+		if !ok {
+			return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
+		}
+		if err := proto.Unmarshal(data, pb); err != nil {
+			return nil, actual, err
+		}
+		return into, actual, nil
+	case err != nil:
+		return nil, actual, err
+	default:
+		copyKindDefaults(actual, &types[0])
+		// if the result of defaulting did not set a version or group, ensure that at least group is set
+		// (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+		// of into is set if there is no better information from the caller or object.
+		if len(actual.Version) == 0 && len(actual.Group) == 0 {
+			actual.Group = types[0].Group
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, actual, runtime.NewMissingKindErr("<protobuf encoded body - must provide default type>")
+	}
+	if len(actual.Version) == 0 {
+		return nil, actual, runtime.NewMissingVersionErr("<protobuf encoded body - must provide default type>")
+	}
+
+	return unmarshalToObject(s.typer, s.creater, actual, into, data)
+}
+
+// unmarshalToObject is the common code between decode in the raw and normal serializer.
+func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) {
+	// use the target if necessary
+	obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	pb, ok := obj.(proto.Message)
+	if !ok {
+		return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
+	}
+	if err := proto.Unmarshal(data, pb); err != nil {
+		return nil, actual, err
+	}
+	if actual != nil {
+		obj.GetObjectKind().SetGroupVersionKind(*actual)
+	}
+	return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer. Overrides is ignored.
+func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, w)
+	}
+	return s.doEncode(obj, w)
+}
+
+func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
+	switch t := obj.(type) {
+	case bufferedReverseMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalToSizedBuffer methods
+		encodedSize := uint64(t.Size())
+		data := make([]byte, encodedSize)
+
+		n, err := t.MarshalToSizedBuffer(data)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data[:n])
+		return err
+
+	case bufferedMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalTo methods
+		encodedSize := uint64(t.Size())
+		data := make([]byte, encodedSize)
+
+		n, err := t.MarshalTo(data)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data[:n])
+		return err
+
+	case proto.Marshaler:
+		// this path performs extra allocations
+		data, err := t.Marshal()
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+
+	default:
+		return errNotMarshalable{reflect.TypeOf(obj)}
+	}
+}
+
+// Identifier implements runtime.Encoder interface.
+func (s *RawSerializer) Identifier() runtime.Identifier {
+	return rawSerializerIdentifier
+}
+
+var LengthDelimitedFramer = lengthDelimitedFramer{}
+
+type lengthDelimitedFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer {
+	return framer.NewLengthDelimitedFrameWriter(w)
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	return framer.NewLengthDelimitedFrameReader(r)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
new file mode 100644
index 0000000..38497ab
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package recognizer
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type RecognizingDecoder interface {
+	runtime.Decoder
+	// RecognizesData should return true if the input provided in the provided reader
+	// belongs to this decoder, or an error if the data could not be read or is ambiguous.
+	// Unknown is true if the data could not be determined to match the decoder type.
+	// Decoders should assume that they can read as much of peek as they need (as the caller
+	// provides) and may return unknown if the data provided is not sufficient to make a
+	// a determination. When peek returns EOF that may mean the end of the input or the
+	// end of buffered input - recognizers should return the best guess at that time.
+	RecognizesData(peek io.Reader) (ok, unknown bool, err error)
+}
+
+// NewDecoder creates a decoder that will attempt multiple decoders in an order defined
+// by:
+//
+// 1. The decoder implements RecognizingDecoder and identifies the data
+// 2. All other decoders, and any decoder that returned true for unknown.
+//
+// The order passed to the constructor is preserved within those priorities.
+func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder {
+	return &decoder{
+		decoders: decoders,
+	}
+}
+
+type decoder struct {
+	decoders []runtime.Decoder
+}
+
+var _ RecognizingDecoder = &decoder{}
+
+func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) {
+	var (
+		lastErr    error
+		anyUnknown bool
+	)
+	data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024)
+	for _, r := range d.decoders {
+		switch t := r.(type) {
+		case RecognizingDecoder:
+			ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data))
+			if err != nil {
+				lastErr = err
+				continue
+			}
+			anyUnknown = anyUnknown || unknown
+			if !ok {
+				continue
+			}
+			return true, false, nil
+		}
+	}
+	return false, anyUnknown, lastErr
+}
+
+func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	var (
+		lastErr error
+		skipped []runtime.Decoder
+	)
+
+	// try recognizers, record any decoders we need to give a chance later
+	for _, r := range d.decoders {
+		switch t := r.(type) {
+		case RecognizingDecoder:
+			buf := bytes.NewBuffer(data)
+			ok, unknown, err := t.RecognizesData(buf)
+			if err != nil {
+				lastErr = err
+				continue
+			}
+			if unknown {
+				skipped = append(skipped, t)
+				continue
+			}
+			if !ok {
+				continue
+			}
+			return r.Decode(data, gvk, into)
+		default:
+			skipped = append(skipped, t)
+		}
+	}
+
+	// try recognizers that returned unknown or didn't recognize their data
+	for _, r := range skipped {
+		out, actual, err := r.Decode(data, gvk, into)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		return out, actual, nil
+	}
+
+	if lastErr == nil {
+		lastErr = fmt.Errorf("no serialization format matched the provided data")
+	}
+	return nil, nil, lastErr
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
new file mode 100644
index 0000000..a60a7c0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package streaming implements encoder and decoder for streams
+// of runtime.Objects over io.Writer/Readers.
+package streaming
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Encoder is a runtime.Encoder on a stream.
+type Encoder interface {
+	// Encode will write the provided object to the stream or return an error. It obeys the same
+	// contract as runtime.VersionedEncoder.
+	Encode(obj runtime.Object) error
+}
+
+// Decoder is a runtime.Decoder from a stream.
+type Decoder interface {
+	// Decode will return io.EOF when no more objects are available.
+	Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
+	// Close closes the underlying stream.
+	Close() error
+}
+
+// Serializer is a factory for creating encoders and decoders that work over streams.
+type Serializer interface {
+	NewEncoder(w io.Writer) Encoder
+	NewDecoder(r io.ReadCloser) Decoder
+}
+
+type decoder struct {
+	reader    io.ReadCloser
+	decoder   runtime.Decoder
+	buf       []byte
+	maxBytes  int
+	resetRead bool
+}
+
+// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
+// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
+// an entire object.
+func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
+	return &decoder{
+		reader:   r,
+		decoder:  d,
+		buf:      make([]byte, 1024),
+		maxBytes: 16 * 1024 * 1024,
+	}
+}
+
+var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
+
+// Decode reads the next object from the stream and decodes it.
+func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	base := 0
+	for {
+		n, err := d.reader.Read(d.buf[base:])
+		if err == io.ErrShortBuffer {
+			if n == 0 {
+				return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
+			}
+			if d.resetRead {
+				continue
+			}
+			// double the buffer size up to maxBytes
+			if len(d.buf) < d.maxBytes {
+				base += n
+				d.buf = append(d.buf, make([]byte, len(d.buf))...)
+				continue
+			}
+			// must read the rest of the frame (until we stop getting ErrShortBuffer)
+			d.resetRead = true
+			base = 0
+			return nil, nil, ErrObjectTooLarge
+		}
+		if err != nil {
+			return nil, nil, err
+		}
+		if d.resetRead {
+			// now that we have drained the large read, continue
+			d.resetRead = false
+			continue
+		}
+		base += n
+		break
+	}
+	return d.decoder.Decode(d.buf[:base], defaults, into)
+}
+
+func (d *decoder) Close() error {
+	return d.reader.Close()
+}
+
+type encoder struct {
+	writer  io.Writer
+	encoder runtime.Encoder
+	buf     *bytes.Buffer
+}
+
+// NewEncoder returns a new streaming encoder.
+func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
+	return &encoder{
+		writer:  w,
+		encoder: e,
+		buf:     &bytes.Buffer{},
+	}
+}
+
+// Encode writes the provided object to the nested writer.
+func (e *encoder) Encode(obj runtime.Object) error {
+	if err := e.encoder.Encode(obj, e.buf); err != nil {
+		return err
+	}
+	_, err := e.writer.Write(e.buf.Bytes())
+	e.buf.Reset()
+	return err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
new file mode 100644
index 0000000..718c5df
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
@@ -0,0 +1,250 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioning
+
+import (
+	"encoding/json"
+	"io"
+	"reflect"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/klog/v2"
+)
+
+// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
+func NewDefaultingCodecForScheme(
+	// TODO: I should be a scheme interface?
+	scheme *runtime.Scheme,
+	encoder runtime.Encoder,
+	decoder runtime.Decoder,
+	encodeVersion runtime.GroupVersioner,
+	decodeVersion runtime.GroupVersioner,
+) runtime.Codec {
+	return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion, scheme.Name())
+}
+
+// NewCodec takes objects in their internal versions and converts them to external versions before
+// serializing them. It assumes the serializer provided to it only deals with external versions.
+// This class is also a serializer, but is generally used with a specific version.
+func NewCodec(
+	encoder runtime.Encoder,
+	decoder runtime.Decoder,
+	convertor runtime.ObjectConvertor,
+	creater runtime.ObjectCreater,
+	typer runtime.ObjectTyper,
+	defaulter runtime.ObjectDefaulter,
+	encodeVersion runtime.GroupVersioner,
+	decodeVersion runtime.GroupVersioner,
+	originalSchemeName string,
+) runtime.Codec {
+	internal := &codec{
+		encoder:   encoder,
+		decoder:   decoder,
+		convertor: convertor,
+		creater:   creater,
+		typer:     typer,
+		defaulter: defaulter,
+
+		encodeVersion: encodeVersion,
+		decodeVersion: decodeVersion,
+
+		identifier: identifier(encodeVersion, encoder),
+
+		originalSchemeName: originalSchemeName,
+	}
+	return internal
+}
+
+type codec struct {
+	encoder   runtime.Encoder
+	decoder   runtime.Decoder
+	convertor runtime.ObjectConvertor
+	creater   runtime.ObjectCreater
+	typer     runtime.ObjectTyper
+	defaulter runtime.ObjectDefaulter
+
+	encodeVersion runtime.GroupVersioner
+	decodeVersion runtime.GroupVersioner
+
+	identifier runtime.Identifier
+
+	// originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
+	originalSchemeName string
+}
+
+var identifiersMap sync.Map
+
+type codecIdentifier struct {
+	EncodeGV string `json:"encodeGV,omitempty"`
+	Encoder  string `json:"encoder,omitempty"`
+	Name     string `json:"name,omitempty"`
+}
+
+// identifier computes Identifier of Encoder based on codec parameters.
+func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier {
+	result := codecIdentifier{
+		Name: "versioning",
+	}
+
+	if encodeGV != nil {
+		result.EncodeGV = encodeGV.Identifier()
+	}
+	if encoder != nil {
+		result.Encoder = string(encoder.Identifier())
+	}
+	if id, ok := identifiersMap.Load(result); ok {
+		return id.(runtime.Identifier)
+	}
+	identifier, err := json.Marshal(result)
+	if err != nil {
+		klog.Fatalf("Failed marshaling identifier for codec: %v", err)
+	}
+	identifiersMap.Store(result, runtime.Identifier(identifier))
+	return runtime.Identifier(identifier)
+}
+
+// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
+// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
+// into that matches the serialized version.
+func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	// If the into object is unstructured and expresses an opinion about its group/version,
+	// create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
+	decodeInto := into
+	if into != nil {
+		if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() {
+			decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object)
+		}
+	}
+
+	obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
+	if err != nil {
+		return nil, gvk, err
+	}
+
+	if d, ok := obj.(runtime.NestedObjectDecoder); ok {
+		if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
+			return nil, gvk, err
+		}
+	}
+
+	// if we specify a target, use generic conversion.
+	if into != nil {
+		// perform defaulting if requested
+		if c.defaulter != nil {
+			c.defaulter.Default(obj)
+		}
+
+		// Short-circuit conversion if the into object is same object
+		if into == obj {
+			return into, gvk, nil
+		}
+
+		if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
+			return nil, gvk, err
+		}
+
+		return into, gvk, nil
+	}
+
+	// perform defaulting if requested
+	if c.defaulter != nil {
+		c.defaulter.Default(obj)
+	}
+
+	out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion)
+	if err != nil {
+		return nil, gvk, err
+	}
+	return out, gvk, nil
+}
+
+// Encode ensures the provided object is output in the appropriate group and version, invoking
+// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
+func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(c.Identifier(), c.doEncode, w)
+	}
+	return c.doEncode(obj, w)
+}
+
+func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
+	switch obj := obj.(type) {
+	case *runtime.Unknown:
+		return c.encoder.Encode(obj, w)
+	case runtime.Unstructured:
+		// An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
+		// because the top-level type matches our desired destination type. actually send the object to the converter
+		// to give it a chance to convert the list items if needed.
+		if _, ok := obj.(*unstructured.UnstructuredList); !ok {
+			// avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
+			objGVK := obj.GetObjectKind().GroupVersionKind()
+			if len(objGVK.Version) == 0 {
+				return c.encoder.Encode(obj, w)
+			}
+			targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
+			if !ok {
+				return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
+			}
+			if targetGVK == objGVK {
+				return c.encoder.Encode(obj, w)
+			}
+		}
+	}
+
+	gvks, isUnversioned, err := c.typer.ObjectKinds(obj)
+	if err != nil {
+		return err
+	}
+
+	objectKind := obj.GetObjectKind()
+	old := objectKind.GroupVersionKind()
+	// restore the old GVK after encoding
+	defer objectKind.SetGroupVersionKind(old)
+
+	if c.encodeVersion == nil || isUnversioned {
+		if e, ok := obj.(runtime.NestedObjectEncoder); ok {
+			if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+				return err
+			}
+		}
+		objectKind.SetGroupVersionKind(gvks[0])
+		return c.encoder.Encode(obj, w)
+	}
+
+	// Perform a conversion if necessary
+	out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion)
+	if err != nil {
+		return err
+	}
+
+	if e, ok := out.(runtime.NestedObjectEncoder); ok {
+		if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+			return err
+		}
+	}
+
+	// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
+	return c.encoder.Encode(out, w)
+}
+
+// Identifier implements runtime.Encoder interface.
+func (c *codec) Identifier() runtime.Identifier {
+	return c.identifier
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go
new file mode 100644
index 0000000..5bc642b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go
@@ -0,0 +1,262 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/doc"
+	"go/parser"
+	"go/token"
+	"io"
+	"reflect"
+	"strings"
+)
+
+// Pair of strings. We keed the name of fields and the doc
+type Pair struct {
+	Name, Doc string
+}
+
+// KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself
+type KubeTypes []Pair
+
+func astFrom(filePath string) *doc.Package {
+	fset := token.NewFileSet()
+	m := make(map[string]*ast.File)
+
+	f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)
+	if err != nil {
+		fmt.Println(err)
+		return nil
+	}
+
+	m[filePath] = f
+	apkg, _ := ast.NewPackage(fset, m, nil, nil)
+
+	return doc.New(apkg, "", 0)
+}
+
+func fmtRawDoc(rawDoc string) string {
+	var buffer bytes.Buffer
+	delPrevChar := func() {
+		if buffer.Len() > 0 {
+			buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n"
+		}
+	}
+
+	// Ignore all lines after ---
+	rawDoc = strings.Split(rawDoc, "---")[0]
+
+	for _, line := range strings.Split(rawDoc, "\n") {
+		line = strings.TrimRight(line, " ")
+		leading := strings.TrimLeft(line, " ")
+		switch {
+		case len(line) == 0: // Keep paragraphs
+			delPrevChar()
+			buffer.WriteString("\n\n")
+		case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs
+		case strings.HasPrefix(leading, "+"): // Ignore instructions to the generators
+		default:
+			if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") {
+				delPrevChar()
+				line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..."
+			} else {
+				line += " "
+			}
+			buffer.WriteString(line)
+		}
+	}
+
+	postDoc := strings.TrimRight(buffer.String(), "\n")
+	postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to "
+	postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape "
+	postDoc = strings.Replace(postDoc, "\n", "\\n", -1)
+	postDoc = strings.Replace(postDoc, "\t", "\\t", -1)
+
+	return postDoc
+}
+
+// fieldName returns the name of the field as it should appear in JSON format
+// "-" indicates that this field is not part of the JSON representation
+func fieldName(field *ast.Field) string {
+	jsonTag := ""
+	if field.Tag != nil {
+		jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation
+		if strings.Contains(jsonTag, "inline") {
+			return "-"
+		}
+	}
+
+	jsonTag = strings.Split(jsonTag, ",")[0] // This can return "-"
+	if jsonTag == "" {
+		if field.Names != nil {
+			return field.Names[0].Name
+		}
+		return field.Type.(*ast.Ident).Name
+	}
+	return jsonTag
+}
+
+// A buffer of lines that will be written.
+type bufferedLine struct {
+	line        string
+	indentation int
+}
+
+type buffer struct {
+	lines []bufferedLine
+}
+
+func newBuffer() *buffer {
+	return &buffer{
+		lines: make([]bufferedLine, 0),
+	}
+}
+
+func (b *buffer) addLine(line string, indent int) {
+	b.lines = append(b.lines, bufferedLine{line, indent})
+}
+
+func (b *buffer) flushLines(w io.Writer) error {
+	for _, line := range b.lines {
+		indentation := strings.Repeat("\t", line.indentation)
+		fullLine := fmt.Sprintf("%s%s", indentation, line.line)
+		if _, err := io.WriteString(w, fullLine); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeFuncHeader(b *buffer, structName string, indent int) {
+	s := fmt.Sprintf("var map_%s = map[string]string {\n", structName)
+	b.addLine(s, indent)
+}
+
+func writeFuncFooter(b *buffer, structName string, indent int) {
+	b.addLine("}\n", indent) // Closes the map definition
+
+	s := fmt.Sprintf("func (%s) SwaggerDoc() map[string]string {\n", structName)
+	b.addLine(s, indent)
+	s = fmt.Sprintf("return map_%s\n", structName)
+	b.addLine(s, indent+1)
+	b.addLine("}\n", indent) // Closes the function definition
+}
+
+func writeMapBody(b *buffer, kubeType []Pair, indent int) {
+	format := "\"%s\": \"%s\",\n"
+	for _, pair := range kubeType {
+		s := fmt.Sprintf(format, pair.Name, pair.Doc)
+		b.addLine(s, indent+2)
+	}
+}
+
+// ParseDocumentationFrom gets all types' documentation and returns them as an
+// array. Each type is again represented as an array (we have to use arrays as we
+// need to be sure for the order of the fields). This function returns fields and
+// struct definitions that have no documentation as {name, ""}.
+func ParseDocumentationFrom(src string) []KubeTypes {
+	var docForTypes []KubeTypes
+
+	pkg := astFrom(src)
+
+	for _, kubType := range pkg.Types {
+		if structType, ok := kubType.Decl.Specs[0].(*ast.TypeSpec).Type.(*ast.StructType); ok {
+			var ks KubeTypes
+			ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc)})
+
+			for _, field := range structType.Fields.List {
+				if n := fieldName(field); n != "-" {
+					fieldDoc := fmtRawDoc(field.Doc.Text())
+					ks = append(ks, Pair{n, fieldDoc})
+				}
+			}
+			docForTypes = append(docForTypes, ks)
+		}
+	}
+
+	return docForTypes
+}
+
+// WriteSwaggerDocFunc writes a declaration of a function as a string. This function is used in
+// Swagger as a documentation source for structs and theirs fields
+func WriteSwaggerDocFunc(kubeTypes []KubeTypes, w io.Writer) error {
+	for _, kubeType := range kubeTypes {
+		structName := kubeType[0].Name
+		kubeType[0].Name = ""
+
+		// Ignore empty documentation
+		docfulTypes := make(KubeTypes, 0, len(kubeType))
+		for _, pair := range kubeType {
+			if pair.Doc != "" {
+				docfulTypes = append(docfulTypes, pair)
+			}
+		}
+
+		if len(docfulTypes) == 0 {
+			continue // If no fields and the struct have documentation, skip the function definition
+		}
+
+		indent := 0
+		buffer := newBuffer()
+
+		writeFuncHeader(buffer, structName, indent)
+		writeMapBody(buffer, docfulTypes, indent)
+		writeFuncFooter(buffer, structName, indent)
+		buffer.addLine("\n", 0)
+
+		if err := buffer.flushLines(w); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// VerifySwaggerDocsExist writes in a io.Writer a list of structs and fields that
+// are missing of documentation.
+func VerifySwaggerDocsExist(kubeTypes []KubeTypes, w io.Writer) (int, error) {
+	missingDocs := 0
+	buffer := newBuffer()
+
+	for _, kubeType := range kubeTypes {
+		structName := kubeType[0].Name
+		if kubeType[0].Doc == "" {
+			format := "Missing documentation for the struct itself: %s\n"
+			s := fmt.Sprintf(format, structName)
+			buffer.addLine(s, 0)
+			missingDocs++
+		}
+		kubeType = kubeType[1:] // Skip struct definition
+
+		for _, pair := range kubeType { // Iterate only the fields
+			if pair.Doc == "" {
+				format := "In struct: %s, field documentation is missing: %s\n"
+				s := fmt.Sprintf(format, structName, pair.Name)
+				buffer.addLine(s, 0)
+				missingDocs++
+			}
+		}
+	}
+
+	if err := buffer.flushLines(w); err != nil {
+		return -1, err
+	}
+	return missingDocs, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
new file mode 100644
index 0000000..31359f3
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+// Note that the types provided in this file are not versioned and are intended to be
+// safe to use from within all versions of every API object.
+
+// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type,
+// like this:
+// type MyAwesomeAPIObject struct {
+//      runtime.TypeMeta    `json:",inline"`
+//      ... // other fields
+// }
+// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind
+//
+// TypeMeta is provided here for convenience. You may use it directly from this package or define
+// your own with the same fields.
+//
+// +k8s:deepcopy-gen=false
+// +protobuf=true
+// +k8s:openapi-gen=true
+type TypeMeta struct {
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
+	// +optional
+	Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"`
+}
+
+const (
+	ContentTypeJSON     string = "application/json"
+	ContentTypeYAML     string = "application/yaml"
+	ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
+)
+
+// RawExtension is used to hold extensions in external versions.
+//
+// To use this, make a field which has RawExtension as its type in your external, versioned
+// struct, and Object in your internal struct. You also need to register your
+// various plugin types.
+//
+// // Internal package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+//	MyPlugin runtime.Object `json:"myPlugin"`
+// }
+// type PluginA struct {
+//	AOption string `json:"aOption"`
+// }
+//
+// // External package:
+// type MyAPIObject struct {
+// 	runtime.TypeMeta `json:",inline"`
+//	MyPlugin runtime.RawExtension `json:"myPlugin"`
+// }
+// type PluginA struct {
+//	AOption string `json:"aOption"`
+// }
+//
+// // On the wire, the JSON will look something like this:
+// {
+//	"kind":"MyAPIObject",
+//	"apiVersion":"v1",
+//	"myPlugin": {
+//		"kind":"PluginA",
+//		"aOption":"foo",
+//	},
+// }
+//
+// So what happens? Decode first uses json or yaml to unmarshal the serialized data into
+// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.
+// The next step is to copy (using pkg/conversion) into the internal struct. The runtime
+// package's DefaultScheme has conversion functions installed which will unpack the
+// JSON stored in RawExtension, turning it into the correct object type, and storing it
+// in the Object. (TODO: In the case where the object is of an unknown type, a
+// runtime.Unknown object will be created and stored.)
+//
+// +k8s:deepcopy-gen=true
+// +protobuf=true
+// +k8s:openapi-gen=true
+type RawExtension struct {
+	// Raw is the underlying serialization of this object.
+	//
+	// TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
+	Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"`
+	// Object can hold a representation of this extension - useful for working with versioned
+	// structs.
+	Object Object `json:"-"`
+}
+
+// Unknown allows api objects with unknown types to be passed-through. This can be used
+// to deal with the API objects from a plug-in. Unknown objects still have functioning
+// TypeMeta features-- kind, version, etc.
+// TODO: Make this object have easy access to field based accessors and settors for
+// metadata and field mutatation.
+//
+// +k8s:deepcopy-gen=true
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +protobuf=true
+// +k8s:openapi-gen=true
+type Unknown struct {
+	TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"`
+	// Raw will hold the complete serialized object which couldn't be matched
+	// with a registered type. Most likely, nothing should be done with this
+	// except for passing it through the system.
+	Raw []byte `protobuf:"bytes,2,opt,name=raw"`
+	// ContentEncoding is encoding used to encode 'Raw' data.
+	// Unspecified means no encoding.
+	ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"`
+	// ContentType  is serialization method used to serialize 'Raw'.
+	// Unspecified means ContentTypeJSON.
+	ContentType string `protobuf:"bytes,4,opt,name=contentType"`
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
new file mode 100644
index 0000000..a82227b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+)
+
+type ProtobufMarshaller interface {
+	MarshalTo(data []byte) (int, error)
+}
+
+type ProtobufReverseMarshaller interface {
+	MarshalToSizedBuffer(data []byte) (int, error)
+}
+
+// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
+// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller.
+func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
+	// Calculate the full size of the message.
+	msgSize := m.Size()
+	if b != nil {
+		msgSize += int(size) + sovGenerated(size) + 1
+	}
+
+	// Reverse marshal the fields of m.
+	i := msgSize
+	i -= len(m.ContentType)
+	copy(data[i:], m.ContentType)
+	i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
+	i--
+	data[i] = 0x22
+	i -= len(m.ContentEncoding)
+	copy(data[i:], m.ContentEncoding)
+	i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
+	i--
+	data[i] = 0x1a
+	if b != nil {
+		if r, ok := b.(ProtobufReverseMarshaller); ok {
+			n1, err := r.MarshalToSizedBuffer(data[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= int(size)
+			if uint64(n1) != size {
+				// programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto
+				// struct returned would be wrong.
+				return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1)
+			}
+		} else {
+			i -= int(size)
+			n1, err := b.MarshalTo(data[i:])
+			if err != nil {
+				return 0, err
+			}
+			if uint64(n1) != size {
+				// programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto
+				// struct returned would be wrong.
+				return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1)
+			}
+		}
+		i = encodeVarintGenerated(data, i, size)
+		i--
+		data[i] = 0x12
+	}
+	n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i])
+	if err != nil {
+		return 0, err
+	}
+	i -= n2
+	i = encodeVarintGenerated(data, i, uint64(n2))
+	i--
+	data[i] = 0xa
+	return msgSize - i, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
new file mode 100644
index 0000000..b039383
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go
@@ -0,0 +1,75 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package runtime
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RawExtension) DeepCopyInto(out *RawExtension) {
+	*out = *in
+	if in.Raw != nil {
+		in, out := &in.Raw, &out.Raw
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Object != nil {
+		out.Object = in.Object.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawExtension.
+func (in *RawExtension) DeepCopy() *RawExtension {
+	if in == nil {
+		return nil
+	}
+	out := new(RawExtension)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Unknown) DeepCopyInto(out *Unknown) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Raw != nil {
+		in, out := &in.Raw, &out.Raw
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unknown.
+func (in *Unknown) DeepCopy() *Unknown {
+	if in == nil {
+		return nil
+	}
+	out := new(Unknown)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object.
+func (in *Unknown) DeepCopyObject() Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/selection/operator.go b/vendor/k8s.io/apimachinery/pkg/selection/operator.go
new file mode 100644
index 0000000..298f798
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/selection/operator.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package selection
+
+// Operator represents a key/field's relationship to value(s).
+// See labels.Requirement and fields.Requirement for more details.
+type Operator string
+
+const (
+	DoesNotExist Operator = "!"
+	Equals       Operator = "="
+	DoubleEquals Operator = "=="
+	In           Operator = "in"
+	NotEquals    Operator = "!="
+	NotIn        Operator = "notin"
+	Exists       Operator = "exists"
+	GreaterThan  Operator = "gt"
+	LessThan     Operator = "lt"
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go
new file mode 100644
index 0000000..5667fa9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package types implements various generic types used throughout kubernetes.
+package types // import "k8s.io/apimachinery/pkg/types"
diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
new file mode 100644
index 0000000..88f0de3
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+)
+
+// NamespacedName comprises a resource name, with a mandatory namespace,
+// rendered as "<namespace>/<name>".  Being a type captures intent and
+// helps make sure that UIDs, namespaced names and non-namespaced names
+// do not get conflated in code.  For most use cases, namespace and name
+// will already have been format validated at the API entry point, so we
+// don't do that here.  Where that's not the case (e.g. in testing),
+// consider using NamespacedNameOrDie() in testing.go in this package.
+
+type NamespacedName struct {
+	Namespace string
+	Name      string
+}
+
+const (
+	Separator = '/'
+)
+
+// String returns the general purpose string representation
+func (n NamespacedName) String() string {
+	return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go
new file mode 100644
index 0000000..fee348d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/nodename.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// NodeName is a type that holds a api.Node's Name identifier.
+// Being a type captures intent and helps make sure that the node name
+// is not confused with similar concepts (the hostname, the cloud provider id,
+// the cloud provider name etc)
+//
+// To clarify the various types:
+//
+// * Node.Name is the Name field of the Node in the API.  This should be stored in a NodeName.
+//   Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level.
+//
+// * Hostname is the hostname of the local machine (from uname -n).
+//   However, some components allow the user to pass in a --hostname-override flag,
+//   which will override this in most places. In the absence of anything more meaningful,
+//   kubelet will use Hostname as the Node.Name when it creates the Node.
+//
+// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId.
+//
+//   For GCE, InstanceName is the Name of an Instance object in the GCE API.  On GCE, Instance.Name becomes the
+//   Hostname, and thus it makes sense also to use it as the Node.Name.  But that is GCE specific, and it is up
+//   to the cloudprovider how to do this mapping.
+//
+//   For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the
+//   PrivateDnsName for the Node.Name.  And this is _not_ always the same as the hostname: if
+//   we are using a custom DHCP domain it won't be.
+type NodeName string
diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go
new file mode 100644
index 0000000..fe8ecaa
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// Similarly to above, these are constants to support HTTP PATCH utilized by
+// both the client and server that didn't make sense for a whole package to be
+// dedicated to.
+type PatchType string
+
+const (
+	JSONPatchType           PatchType = "application/json-patch+json"
+	MergePatchType          PatchType = "application/merge-patch+json"
+	StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
+	ApplyPatchType          PatchType = "application/apply-patch+yaml"
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/types/uid.go b/vendor/k8s.io/apimachinery/pkg/types/uid.go
new file mode 100644
index 0000000..8693392
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/types/uid.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// UID is a type that holds unique ID values, including UUIDs.  Because we
+// don't ONLY use UUIDs, this is an alias to string.  Being a type captures
+// intent and helps make sure that UIDs and names do not get conflated.
+type UID string
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
new file mode 100644
index 0000000..6cf13d8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
@@ -0,0 +1,393 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clock
+
+import (
+	"sync"
+	"time"
+)
+
+// PassiveClock allows for injecting fake or real clocks into code
+// that needs to read the current time but does not support scheduling
+// activity in the future.
+type PassiveClock interface {
+	Now() time.Time
+	Since(time.Time) time.Duration
+}
+
+// Clock allows for injecting fake or real clocks into code that
+// needs to do arbitrary things based on time.
+type Clock interface {
+	PassiveClock
+	After(time.Duration) <-chan time.Time
+	NewTimer(time.Duration) Timer
+	Sleep(time.Duration)
+	NewTicker(time.Duration) Ticker
+}
+
+// RealClock really calls time.Now()
+type RealClock struct{}
+
+// Now returns the current time.
+func (RealClock) Now() time.Time {
+	return time.Now()
+}
+
+// Since returns time since the specified timestamp.
+func (RealClock) Since(ts time.Time) time.Duration {
+	return time.Since(ts)
+}
+
+// After is the same as time.After(d).
+func (RealClock) After(d time.Duration) <-chan time.Time {
+	return time.After(d)
+}
+
+// NewTimer returns a new Timer.
+func (RealClock) NewTimer(d time.Duration) Timer {
+	return &realTimer{
+		timer: time.NewTimer(d),
+	}
+}
+
+// NewTicker returns a new Ticker.
+func (RealClock) NewTicker(d time.Duration) Ticker {
+	return &realTicker{
+		ticker: time.NewTicker(d),
+	}
+}
+
+// Sleep pauses the RealClock for duration d.
+func (RealClock) Sleep(d time.Duration) {
+	time.Sleep(d)
+}
+
+// FakePassiveClock implements PassiveClock, but returns an arbitrary time.
+type FakePassiveClock struct {
+	lock sync.RWMutex
+	time time.Time
+}
+
+// FakeClock implements Clock, but returns an arbitrary time.
+type FakeClock struct {
+	FakePassiveClock
+
+	// waiters are waiting for the fake time to pass their specified time
+	waiters []fakeClockWaiter
+}
+
+type fakeClockWaiter struct {
+	targetTime    time.Time
+	stepInterval  time.Duration
+	skipIfBlocked bool
+	destChan      chan time.Time
+}
+
+// NewFakePassiveClock returns a new FakePassiveClock.
+func NewFakePassiveClock(t time.Time) *FakePassiveClock {
+	return &FakePassiveClock{
+		time: t,
+	}
+}
+
+// NewFakeClock returns a new FakeClock
+func NewFakeClock(t time.Time) *FakeClock {
+	return &FakeClock{
+		FakePassiveClock: *NewFakePassiveClock(t),
+	}
+}
+
+// Now returns f's time.
+func (f *FakePassiveClock) Now() time.Time {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	return f.time
+}
+
+// Since returns time since the time in f.
+func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	return f.time.Sub(ts)
+}
+
+// SetTime sets the time on the FakePassiveClock.
+func (f *FakePassiveClock) SetTime(t time.Time) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.time = t
+}
+
+// After is the Fake version of time.After(d).
+func (f *FakeClock) After(d time.Duration) <-chan time.Time {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	stopTime := f.time.Add(d)
+	ch := make(chan time.Time, 1) // Don't block!
+	f.waiters = append(f.waiters, fakeClockWaiter{
+		targetTime: stopTime,
+		destChan:   ch,
+	})
+	return ch
+}
+
+// NewTimer is the Fake version of time.NewTimer(d).
+func (f *FakeClock) NewTimer(d time.Duration) Timer {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	stopTime := f.time.Add(d)
+	ch := make(chan time.Time, 1) // Don't block!
+	timer := &fakeTimer{
+		fakeClock: f,
+		waiter: fakeClockWaiter{
+			targetTime: stopTime,
+			destChan:   ch,
+		},
+	}
+	f.waiters = append(f.waiters, timer.waiter)
+	return timer
+}
+
+// NewTicker returns a new Ticker.
+func (f *FakeClock) NewTicker(d time.Duration) Ticker {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	tickTime := f.time.Add(d)
+	ch := make(chan time.Time, 1) // hold one tick
+	f.waiters = append(f.waiters, fakeClockWaiter{
+		targetTime:    tickTime,
+		stepInterval:  d,
+		skipIfBlocked: true,
+		destChan:      ch,
+	})
+
+	return &fakeTicker{
+		c: ch,
+	}
+}
+
+// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
+func (f *FakeClock) Step(d time.Duration) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.setTimeLocked(f.time.Add(d))
+}
+
+// SetTime sets the time on a FakeClock.
+func (f *FakeClock) SetTime(t time.Time) {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	f.setTimeLocked(t)
+}
+
+// Actually changes the time and checks any waiters. f must be write-locked.
+func (f *FakeClock) setTimeLocked(t time.Time) {
+	f.time = t
+	newWaiters := make([]fakeClockWaiter, 0, len(f.waiters))
+	for i := range f.waiters {
+		w := &f.waiters[i]
+		if !w.targetTime.After(t) {
+
+			if w.skipIfBlocked {
+				select {
+				case w.destChan <- t:
+				default:
+				}
+			} else {
+				w.destChan <- t
+			}
+
+			if w.stepInterval > 0 {
+				for !w.targetTime.After(t) {
+					w.targetTime = w.targetTime.Add(w.stepInterval)
+				}
+				newWaiters = append(newWaiters, *w)
+			}
+
+		} else {
+			newWaiters = append(newWaiters, f.waiters[i])
+		}
+	}
+	f.waiters = newWaiters
+}
+
+// HasWaiters returns true if After has been called on f but not yet satisfied (so you can
+// write race-free tests).
+func (f *FakeClock) HasWaiters() bool {
+	f.lock.RLock()
+	defer f.lock.RUnlock()
+	return len(f.waiters) > 0
+}
+
+// Sleep pauses the FakeClock for duration d.
+func (f *FakeClock) Sleep(d time.Duration) {
+	f.Step(d)
+}
+
+// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration
+type IntervalClock struct {
+	Time     time.Time
+	Duration time.Duration
+}
+
+// Now returns i's time.
+func (i *IntervalClock) Now() time.Time {
+	i.Time = i.Time.Add(i.Duration)
+	return i.Time
+}
+
+// Since returns time since the time in i.
+func (i *IntervalClock) Since(ts time.Time) time.Duration {
+	return i.Time.Sub(ts)
+}
+
+// After is currently unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) After(d time.Duration) <-chan time.Time {
+	panic("IntervalClock doesn't implement After")
+}
+
+// NewTimer is currently unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) NewTimer(d time.Duration) Timer {
+	panic("IntervalClock doesn't implement NewTimer")
+}
+
+// NewTicker is currently unimplemented, will panic.
+// TODO: make interval clock use FakeClock so this can be implemented.
+func (*IntervalClock) NewTicker(d time.Duration) Ticker {
+	panic("IntervalClock doesn't implement NewTicker")
+}
+
+// Sleep is currently unimplemented; will panic.
+func (*IntervalClock) Sleep(d time.Duration) {
+	panic("IntervalClock doesn't implement Sleep")
+}
+
+// Timer allows for injecting fake or real timers into code that
+// needs to do arbitrary things based on time.
+type Timer interface {
+	C() <-chan time.Time
+	Stop() bool
+	Reset(d time.Duration) bool
+}
+
+// realTimer is backed by an actual time.Timer.
+type realTimer struct {
+	timer *time.Timer
+}
+
+// C returns the underlying timer's channel.
+func (r *realTimer) C() <-chan time.Time {
+	return r.timer.C
+}
+
+// Stop calls Stop() on the underlying timer.
+func (r *realTimer) Stop() bool {
+	return r.timer.Stop()
+}
+
+// Reset calls Reset() on the underlying timer.
+func (r *realTimer) Reset(d time.Duration) bool {
+	return r.timer.Reset(d)
+}
+
+// fakeTimer implements Timer based on a FakeClock.
+type fakeTimer struct {
+	fakeClock *FakeClock
+	waiter    fakeClockWaiter
+}
+
+// C returns the channel that notifies when this timer has fired.
+func (f *fakeTimer) C() <-chan time.Time {
+	return f.waiter.destChan
+}
+
+// Stop conditionally stops the timer.  If the timer has neither fired
+// nor been stopped then this call stops the timer and returns true,
+// otherwise this call returns false.  This is like time.Timer::Stop.
+func (f *fakeTimer) Stop() bool {
+	f.fakeClock.lock.Lock()
+	defer f.fakeClock.lock.Unlock()
+	// The timer has already fired or been stopped, unless it is found
+	// among the clock's waiters.
+	stopped := false
+	oldWaiters := f.fakeClock.waiters
+	newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters))
+	seekChan := f.waiter.destChan
+	for i := range oldWaiters {
+		// Identify the timer's fakeClockWaiter by the identity of the
+		// destination channel, nothing else is necessarily unique and
+		// constant since the timer's creation.
+		if oldWaiters[i].destChan == seekChan {
+			stopped = true
+		} else {
+			newWaiters = append(newWaiters, oldWaiters[i])
+		}
+	}
+
+	f.fakeClock.waiters = newWaiters
+
+	return stopped
+}
+
+// Reset conditionally updates the firing time of the timer.  If the
+// timer has neither fired nor been stopped then this call resets the
+// timer to the fake clock's "now" + d and returns true, otherwise
+// this call returns false.  This is like time.Timer::Reset.
+func (f *fakeTimer) Reset(d time.Duration) bool {
+	f.fakeClock.lock.Lock()
+	defer f.fakeClock.lock.Unlock()
+	waiters := f.fakeClock.waiters
+	seekChan := f.waiter.destChan
+	for i := range waiters {
+		if waiters[i].destChan == seekChan {
+			waiters[i].targetTime = f.fakeClock.time.Add(d)
+			return true
+		}
+	}
+	return false
+}
+
+// Ticker defines the Ticker interface
+type Ticker interface {
+	C() <-chan time.Time
+	Stop()
+}
+
+type realTicker struct {
+	ticker *time.Ticker
+}
+
+func (t *realTicker) C() <-chan time.Time {
+	return t.ticker.C
+}
+
+func (t *realTicker) Stop() {
+	t.ticker.Stop()
+}
+
+type fakeTicker struct {
+	c <-chan time.Time
+}
+
+func (t *fakeTicker) C() <-chan time.Time {
+	return t.c
+}
+
+func (t *fakeTicker) Stop() {
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
new file mode 100644
index 0000000..5d4d625
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package errors implements various utility functions and types around errors.
+package errors // import "k8s.io/apimachinery/pkg/util/errors"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
new file mode 100644
index 0000000..5bafc21
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
@@ -0,0 +1,249 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package errors
+
+import (
+	"errors"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// MessageCountMap contains occurrence for each error message.
+type MessageCountMap map[string]int
+
+// Aggregate represents an object that contains multiple errors, but does not
+// necessarily have singular semantic meaning.
+// The aggregate can be used with `errors.Is()` to check for the occurrence of
+// a specific error type.
+// Errors.As() is not supported, because the caller presumably cares about a
+// specific error of potentially multiple that match the given type.
+type Aggregate interface {
+	error
+	Errors() []error
+	Is(error) bool
+}
+
+// NewAggregate converts a slice of errors into an Aggregate interface, which
+// is itself an implementation of the error interface.  If the slice is empty,
+// this returns nil.
+// It will check if any of the element of input error list is nil, to avoid
+// nil pointer panic when call Error().
+func NewAggregate(errlist []error) Aggregate {
+	if len(errlist) == 0 {
+		return nil
+	}
+	// In case of input error list contains nil
+	var errs []error
+	for _, e := range errlist {
+		if e != nil {
+			errs = append(errs, e)
+		}
+	}
+	if len(errs) == 0 {
+		return nil
+	}
+	return aggregate(errs)
+}
+
+// This helper implements the error and Errors interfaces.  Keeping it private
+// prevents people from making an aggregate of 0 errors, which is not
+// an error, but does satisfy the error interface.
+type aggregate []error
+
+// Error is part of the error interface.
+func (agg aggregate) Error() string {
+	if len(agg) == 0 {
+		// This should never happen, really.
+		return ""
+	}
+	if len(agg) == 1 {
+		return agg[0].Error()
+	}
+	seenerrs := sets.NewString()
+	result := ""
+	agg.visit(func(err error) bool {
+		msg := err.Error()
+		if seenerrs.Has(msg) {
+			return false
+		}
+		seenerrs.Insert(msg)
+		if len(seenerrs) > 1 {
+			result += ", "
+		}
+		result += msg
+		return false
+	})
+	if len(seenerrs) == 1 {
+		return result
+	}
+	return "[" + result + "]"
+}
+
+func (agg aggregate) Is(target error) bool {
+	return agg.visit(func(err error) bool {
+		return errors.Is(err, target)
+	})
+}
+
+func (agg aggregate) visit(f func(err error) bool) bool {
+	for _, err := range agg {
+		switch err := err.(type) {
+		case aggregate:
+			if match := err.visit(f); match {
+				return match
+			}
+		case Aggregate:
+			for _, nestedErr := range err.Errors() {
+				if match := f(nestedErr); match {
+					return match
+				}
+			}
+		default:
+			if match := f(err); match {
+				return match
+			}
+		}
+	}
+
+	return false
+}
+
+// Errors is part of the Aggregate interface.
+func (agg aggregate) Errors() []error {
+	return []error(agg)
+}
+
+// Matcher is used to match errors.  Returns true if the error matches.
+type Matcher func(error) bool
+
+// FilterOut removes all errors that match any of the matchers from the input
+// error.  If the input is a singular error, only that error is tested.  If the
+// input implements the Aggregate interface, the list of errors will be
+// processed recursively.
+//
+// This can be used, for example, to remove known-OK errors (such as io.EOF or
+// os.PathNotFound) from a list of errors.
+func FilterOut(err error, fns ...Matcher) error {
+	if err == nil {
+		return nil
+	}
+	if agg, ok := err.(Aggregate); ok {
+		return NewAggregate(filterErrors(agg.Errors(), fns...))
+	}
+	if !matchesError(err, fns...) {
+		return err
+	}
+	return nil
+}
+
+// matchesError returns true if any Matcher returns true
+func matchesError(err error, fns ...Matcher) bool {
+	for _, fn := range fns {
+		if fn(err) {
+			return true
+		}
+	}
+	return false
+}
+
+// filterErrors returns any errors (or nested errors, if the list contains
+// nested Errors) for which all fns return false. If no errors
+// remain a nil list is returned. The resulting silec will have all
+// nested slices flattened as a side effect.
+func filterErrors(list []error, fns ...Matcher) []error {
+	result := []error{}
+	for _, err := range list {
+		r := FilterOut(err, fns...)
+		if r != nil {
+			result = append(result, r)
+		}
+	}
+	return result
+}
+
+// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary
+// nesting, and flattens them all into a single Aggregate, recursively.
+func Flatten(agg Aggregate) Aggregate {
+	result := []error{}
+	if agg == nil {
+		return nil
+	}
+	for _, err := range agg.Errors() {
+		if a, ok := err.(Aggregate); ok {
+			r := Flatten(a)
+			if r != nil {
+				result = append(result, r.Errors()...)
+			}
+		} else {
+			if err != nil {
+				result = append(result, err)
+			}
+		}
+	}
+	return NewAggregate(result)
+}
+
+// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate
+func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
+	if m == nil {
+		return nil
+	}
+	result := make([]error, 0, len(m))
+	for errStr, count := range m {
+		var countStr string
+		if count > 1 {
+			countStr = fmt.Sprintf(" (repeated %v times)", count)
+		}
+		result = append(result, fmt.Errorf("%v%v", errStr, countStr))
+	}
+	return NewAggregate(result)
+}
+
+// Reduce will return err or, if err is an Aggregate and only has one item,
+// the first item in the aggregate.
+func Reduce(err error) error {
+	if agg, ok := err.(Aggregate); ok && err != nil {
+		switch len(agg.Errors()) {
+		case 1:
+			return agg.Errors()[0]
+		case 0:
+			return nil
+		}
+	}
+	return err
+}
+
+// AggregateGoroutines runs the provided functions in parallel, stuffing all
+// non-nil errors into the returned Aggregate.
+// Returns nil if all the functions complete successfully.
+func AggregateGoroutines(funcs ...func() error) Aggregate {
+	errChan := make(chan error, len(funcs))
+	for _, f := range funcs {
+		go func(f func() error) { errChan <- f() }(f)
+	}
+	errs := make([]error, 0)
+	for i := 0; i < cap(errChan); i++ {
+		if err := <-errChan; err != nil {
+			errs = append(errs, err)
+		}
+	}
+	return NewAggregate(errs)
+}
+
+// ErrPreconditionViolated is returned when the precondition is violated
+var ErrPreconditionViolated = errors.New("precondition is violated")
diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
new file mode 100644
index 0000000..066680f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package framer implements simple frame decoding techniques for an io.ReadCloser
+package framer
+
+import (
+	"encoding/binary"
+	"encoding/json"
+	"io"
+)
+
+type lengthDelimitedFrameWriter struct {
+	w io.Writer
+	h [4]byte
+}
+
+func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer {
+	return &lengthDelimitedFrameWriter{w: w}
+}
+
+// Write writes a single frame to the nested writer, prepending it with the length in
+// in bytes of data (as a 4 byte, bigendian uint32).
+func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) {
+	binary.BigEndian.PutUint32(w.h[:], uint32(len(data)))
+	n, err := w.w.Write(w.h[:])
+	if err != nil {
+		return 0, err
+	}
+	if n != len(w.h) {
+		return 0, io.ErrShortWrite
+	}
+	return w.w.Write(data)
+}
+
+type lengthDelimitedFrameReader struct {
+	r         io.ReadCloser
+	remaining int
+}
+
+// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed
+// frames off of a stream.
+//
+// The protocol is:
+//
+//   stream: message ...
+//   message: prefix body
+//   prefix: 4 byte uint32 in BigEndian order, denotes length of body
+//   body: bytes (0..prefix)
+//
+// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead
+// will be returned along with the number of bytes read.
+func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser {
+	return &lengthDelimitedFrameReader{r: r}
+}
+
+// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer
+// is returned and subsequent calls will attempt to read the last frame. A frame is complete when
+// err is nil.
+func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) {
+	if r.remaining <= 0 {
+		header := [4]byte{}
+		n, err := io.ReadAtLeast(r.r, header[:4], 4)
+		if err != nil {
+			return 0, err
+		}
+		if n != 4 {
+			return 0, io.ErrUnexpectedEOF
+		}
+		frameLength := int(binary.BigEndian.Uint32(header[:]))
+		r.remaining = frameLength
+	}
+
+	expect := r.remaining
+	max := expect
+	if max > len(data) {
+		max = len(data)
+	}
+	n, err := io.ReadAtLeast(r.r, data[:max], int(max))
+	r.remaining -= n
+	if err == io.ErrShortBuffer || r.remaining > 0 {
+		return n, io.ErrShortBuffer
+	}
+	if err != nil {
+		return n, err
+	}
+	if n != expect {
+		return n, io.ErrUnexpectedEOF
+	}
+
+	return n, nil
+}
+
+func (r *lengthDelimitedFrameReader) Close() error {
+	return r.r.Close()
+}
+
+type jsonFrameReader struct {
+	r         io.ReadCloser
+	decoder   *json.Decoder
+	remaining []byte
+}
+
+// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off
+// of a wire.
+//
+// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate
+// the read.
+func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser {
+	return &jsonFrameReader{
+		r:       r,
+		decoder: json.NewDecoder(r),
+	}
+}
+
+// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned
+// byte slice will be modified the next time ReadFrame is invoked and should not be altered.
+func (r *jsonFrameReader) Read(data []byte) (int, error) {
+	// Return whatever remaining data exists from an in progress frame
+	if n := len(r.remaining); n > 0 {
+		if n <= len(data) {
+			data = append(data[0:0], r.remaining...)
+			r.remaining = nil
+			return n, nil
+		}
+
+		n = len(data)
+		data = append(data[0:0], r.remaining[:n]...)
+		r.remaining = r.remaining[n:]
+		return n, io.ErrShortBuffer
+	}
+
+	// RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see
+	// data written to data, or be larger than data and a different array.
+	n := len(data)
+	m := json.RawMessage(data[:0])
+	if err := r.decoder.Decode(&m); err != nil {
+		return 0, err
+	}
+
+	// If capacity of data is less than length of the message, decoder will allocate a new slice
+	// and set m to it, which means we need to copy the partial result back into data and preserve
+	// the remaining result for subsequent reads.
+	if len(m) > n {
+		data = append(data[0:0], m[:n]...)
+		r.remaining = m[n:]
+		return n, io.ErrShortBuffer
+	}
+	return len(m), nil
+}
+
+func (r *jsonFrameReader) Close() error {
+	return r.r.Close()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
new file mode 100644
index 0000000..ec1cb70
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
@@ -0,0 +1,372 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
+
+package intstr
+
+import (
+	fmt "fmt"
+
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	proto "github.com/gogo/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *IntOrString) Reset()      { *m = IntOrString{} }
+func (*IntOrString) ProtoMessage() {}
+func (*IntOrString) Descriptor() ([]byte, []int) {
+	return fileDescriptor_94e046ae3ce6121c, []int{0}
+}
+func (m *IntOrString) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *IntOrString) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IntOrString.Merge(m, src)
+}
+func (m *IntOrString) XXX_Size() int {
+	return m.Size()
+}
+func (m *IntOrString) XXX_DiscardUnknown() {
+	xxx_messageInfo_IntOrString.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IntOrString proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c)
+}
+
+var fileDescriptor_94e046ae3ce6121c = []byte{
+	// 292 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31,
+	0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d,
+	0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d,
+	0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a,
+	0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36,
+	0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda,
+	0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20,
+	0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e,
+	0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84,
+	0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0,
+	0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30,
+	0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f,
+	0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92,
+	0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3,
+	0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83,
+	0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35,
+	0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb,
+	0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9,
+	0x64, 0x01, 0x00, 0x00,
+}
+
+func (m *IntOrString) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.StrVal)
+	copy(dAtA[i:], m.StrVal)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal)))
+	i--
+	dAtA[i] = 0x1a
+	i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *IntOrString) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Type))
+	n += 1 + sovGenerated(uint64(m.IntVal))
+	l = len(m.StrVal)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *IntOrString) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: IntOrString: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= Type(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType)
+			}
+			m.IntVal = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.IntVal |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StrVal = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
new file mode 100644
index 0000000..e79fb9e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
@@ -0,0 +1,43 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.apimachinery.pkg.util.intstr;
+
+// Package-wide variables from generator "generated".
+option go_package = "intstr";
+
+// IntOrString is a type that can hold an int32 or a string.  When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type.  This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+message IntOrString {
+  optional int64 type = 1;
+
+  optional int32 intVal = 2;
+
+  optional string strVal = 3;
+}
+
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
new file mode 100644
index 0000000..6576def
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -0,0 +1,185 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package intstr
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"runtime/debug"
+	"strconv"
+	"strings"
+
+	"github.com/google/gofuzz"
+	"k8s.io/klog/v2"
+)
+
+// IntOrString is a type that can hold an int32 or a string.  When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type.  This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+type IntOrString struct {
+	Type   Type   `protobuf:"varint,1,opt,name=type,casttype=Type"`
+	IntVal int32  `protobuf:"varint,2,opt,name=intVal"`
+	StrVal string `protobuf:"bytes,3,opt,name=strVal"`
+}
+
+// Type represents the stored type of IntOrString.
+type Type int64
+
+const (
+	Int    Type = iota // The IntOrString holds an int.
+	String             // The IntOrString holds a string.
+)
+
+// FromInt creates an IntOrString object with an int32 value. It is
+// your responsibility not to call this method with a value greater
+// than int32.
+// TODO: convert to (val int32)
+func FromInt(val int) IntOrString {
+	if val > math.MaxInt32 || val < math.MinInt32 {
+		klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
+	}
+	return IntOrString{Type: Int, IntVal: int32(val)}
+}
+
+// FromString creates an IntOrString object with a string value.
+func FromString(val string) IntOrString {
+	return IntOrString{Type: String, StrVal: val}
+}
+
+// Parse the given string and try to convert it to an integer before
+// setting it as a string value.
+func Parse(val string) IntOrString {
+	i, err := strconv.Atoi(val)
+	if err != nil {
+		return FromString(val)
+	}
+	return FromInt(i)
+}
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
+	if value[0] == '"' {
+		intstr.Type = String
+		return json.Unmarshal(value, &intstr.StrVal)
+	}
+	intstr.Type = Int
+	return json.Unmarshal(value, &intstr.IntVal)
+}
+
+// String returns the string value, or the Itoa of the int value.
+func (intstr *IntOrString) String() string {
+	if intstr.Type == String {
+		return intstr.StrVal
+	}
+	return strconv.Itoa(intstr.IntValue())
+}
+
+// IntValue returns the IntVal if type Int, or if
+// it is a String, will attempt a conversion to int,
+// returning 0 if a parsing error occurs.
+func (intstr *IntOrString) IntValue() int {
+	if intstr.Type == String {
+		i, _ := strconv.Atoi(intstr.StrVal)
+		return i
+	}
+	return int(intstr.IntVal)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (intstr IntOrString) MarshalJSON() ([]byte, error) {
+	switch intstr.Type {
+	case Int:
+		return json.Marshal(intstr.IntVal)
+	case String:
+		return json.Marshal(intstr.StrVal)
+	default:
+		return []byte{}, fmt.Errorf("impossible IntOrString.Type")
+	}
+}
+
+// OpenAPISchemaType is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+//
+// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators
+func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} }
+
+// OpenAPISchemaFormat is used by the kube-openapi generator when constructing
+// the OpenAPI spec of this type.
+func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" }
+
+func (intstr *IntOrString) Fuzz(c fuzz.Continue) {
+	if intstr == nil {
+		return
+	}
+	if c.RandBool() {
+		intstr.Type = Int
+		c.Fuzz(&intstr.IntVal)
+		intstr.StrVal = ""
+	} else {
+		intstr.Type = String
+		intstr.IntVal = 0
+		c.Fuzz(&intstr.StrVal)
+	}
+}
+
+func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString {
+	if intOrPercent == nil {
+		return &defaultValue
+	}
+	return intOrPercent
+}
+
+func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) {
+	if intOrPercent == nil {
+		return 0, errors.New("nil value for IntOrString")
+	}
+	value, isPercent, err := getIntOrPercentValue(intOrPercent)
+	if err != nil {
+		return 0, fmt.Errorf("invalid value for IntOrString: %v", err)
+	}
+	if isPercent {
+		if roundUp {
+			value = int(math.Ceil(float64(value) * (float64(total)) / 100))
+		} else {
+			value = int(math.Floor(float64(value) * (float64(total)) / 100))
+		}
+	}
+	return value, nil
+}
+
+func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) {
+	switch intOrStr.Type {
+	case Int:
+		return intOrStr.IntValue(), false, nil
+	case String:
+		s := strings.Replace(intOrStr.StrVal, "%", "", -1)
+		v, err := strconv.Atoi(s)
+		if err != nil {
+			return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err)
+		}
+		return int(v), true, nil
+	}
+	return 0, false, fmt.Errorf("invalid type: neither int nor percentage")
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
new file mode 100644
index 0000000..2048348
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+)
+
+// NewEncoder delegates to json.NewEncoder
+// It is only here so this package can be a drop-in for common encoding/json uses
+func NewEncoder(w io.Writer) *json.Encoder {
+	return json.NewEncoder(w)
+}
+
+// Marshal delegates to json.Marshal
+// It is only here so this package can be a drop-in for common encoding/json uses
+func Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// limit recursive depth to prevent stack overflow errors
+const maxDepth = 10000
+
+// Unmarshal unmarshals the given data
+// If v is a *map[string]interface{}, numbers are converted to int64 or float64
+func Unmarshal(data []byte, v interface{}) error {
+	switch v := v.(type) {
+	case *map[string]interface{}:
+		// Build a decoder from the given data
+		decoder := json.NewDecoder(bytes.NewBuffer(data))
+		// Preserve numbers, rather than casting to float64 automatically
+		decoder.UseNumber()
+		// Run the decode
+		if err := decoder.Decode(v); err != nil {
+			return err
+		}
+		// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+		return convertMapNumbers(*v, 0)
+
+	case *[]interface{}:
+		// Build a decoder from the given data
+		decoder := json.NewDecoder(bytes.NewBuffer(data))
+		// Preserve numbers, rather than casting to float64 automatically
+		decoder.UseNumber()
+		// Run the decode
+		if err := decoder.Decode(v); err != nil {
+			return err
+		}
+		// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+		return convertSliceNumbers(*v, 0)
+
+	case *interface{}:
+		// Build a decoder from the given data
+		decoder := json.NewDecoder(bytes.NewBuffer(data))
+		// Preserve numbers, rather than casting to float64 automatically
+		decoder.UseNumber()
+		// Run the decode
+		if err := decoder.Decode(v); err != nil {
+			return err
+		}
+		// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+		return convertInterfaceNumbers(v, 0)
+
+	default:
+		return json.Unmarshal(data, v)
+	}
+}
+
+func convertInterfaceNumbers(v *interface{}, depth int) error {
+	var err error
+	switch v2 := (*v).(type) {
+	case json.Number:
+		*v, err = convertNumber(v2)
+	case map[string]interface{}:
+		err = convertMapNumbers(v2, depth+1)
+	case []interface{}:
+		err = convertSliceNumbers(v2, depth+1)
+	}
+	return err
+}
+
+// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertMapNumbers(m map[string]interface{}, depth int) error {
+	if depth > maxDepth {
+		return fmt.Errorf("exceeded max depth of %d", maxDepth)
+	}
+
+	var err error
+	for k, v := range m {
+		switch v := v.(type) {
+		case json.Number:
+			m[k], err = convertNumber(v)
+		case map[string]interface{}:
+			err = convertMapNumbers(v, depth+1)
+		case []interface{}:
+			err = convertSliceNumbers(v, depth+1)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertSliceNumbers(s []interface{}, depth int) error {
+	if depth > maxDepth {
+		return fmt.Errorf("exceeded max depth of %d", maxDepth)
+	}
+
+	var err error
+	for i, v := range s {
+		switch v := v.(type) {
+		case json.Number:
+			s[i], err = convertNumber(v)
+		case map[string]interface{}:
+			err = convertMapNumbers(v, depth+1)
+		case []interface{}:
+			err = convertSliceNumbers(v, depth+1)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// convertNumber converts a json.Number to an int64 or float64, or returns an error
+func convertNumber(n json.Number) (interface{}, error) {
+	// Attempt to convert to an int64 first
+	if i, err := n.Int64(); err == nil {
+		return i, nil
+	}
+	// Return a float64 (default json.Decode() behavior)
+	// An overflow will return an error
+	return n.Float64()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go
new file mode 100644
index 0000000..d69bf32
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package naming
+
+import (
+	"fmt"
+	"regexp"
+	goruntime "runtime"
+	"runtime/debug"
+	"strconv"
+	"strings"
+)
+
+// GetNameFromCallsite walks back through the call stack until we find a caller from outside of the ignoredPackages
+// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging
+func GetNameFromCallsite(ignoredPackages ...string) string {
+	name := "????"
+	const maxStack = 10
+	for i := 1; i < maxStack; i++ {
+		_, file, line, ok := goruntime.Caller(i)
+		if !ok {
+			file, line, ok = extractStackCreator()
+			if !ok {
+				break
+			}
+			i += maxStack
+		}
+		if hasPackage(file, append(ignoredPackages, "/runtime/asm_")) {
+			continue
+		}
+
+		file = trimPackagePrefix(file)
+		name = fmt.Sprintf("%s:%d", file, line)
+		break
+	}
+	return name
+}
+
+// hasPackage returns true if the file is in one of the ignored packages.
+func hasPackage(file string, ignoredPackages []string) bool {
+	for _, ignoredPackage := range ignoredPackages {
+		if strings.Contains(file, ignoredPackage) {
+			return true
+		}
+	}
+	return false
+}
+
+// trimPackagePrefix reduces duplicate values off the front of a package name.
+func trimPackagePrefix(file string) string {
+	if l := strings.LastIndex(file, "/vendor/"); l >= 0 {
+		return file[l+len("/vendor/"):]
+	}
+	if l := strings.LastIndex(file, "/src/"); l >= 0 {
+		return file[l+5:]
+	}
+	if l := strings.LastIndex(file, "/pkg/"); l >= 0 {
+		return file[l+1:]
+	}
+	return file
+}
+
+var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`)
+
+// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false
+// if the creator cannot be located.
+// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440
+func extractStackCreator() (string, int, bool) {
+	stack := debug.Stack()
+	matches := stackCreator.FindStringSubmatch(string(stack))
+	if len(matches) != 4 {
+		return "", 0, false
+	}
+	line, err := strconv.Atoi(matches[3])
+	if err != nil {
+		return "", 0, false
+	}
+	return matches[2], line, true
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
new file mode 100644
index 0000000..945886c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
@@ -0,0 +1,724 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"mime"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"regexp"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"golang.org/x/net/http2"
+	"k8s.io/klog/v2"
+)
+
+// JoinPreservingTrailingSlash does a path.Join of the specified elements,
+// preserving any trailing slash on the last non-empty segment
+func JoinPreservingTrailingSlash(elem ...string) string {
+	// do the basic path join
+	result := path.Join(elem...)
+
+	// find the last non-empty segment
+	for i := len(elem) - 1; i >= 0; i-- {
+		if len(elem[i]) > 0 {
+			// if the last segment ended in a slash, ensure our result does as well
+			if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") {
+				result += "/"
+			}
+			break
+		}
+	}
+
+	return result
+}
+
+// IsTimeout returns true if the given error is a network timeout error
+func IsTimeout(err error) bool {
+	var neterr net.Error
+	if errors.As(err, &neterr) {
+		return neterr != nil && neterr.Timeout()
+	}
+	return false
+}
+
+// IsProbableEOF returns true if the given error resembles a connection termination
+// scenario that would justify assuming that the watch is empty.
+// These errors are what the Go http stack returns back to us which are general
+// connection closure errors (strongly correlated) and callers that need to
+// differentiate probable errors in connection behavior between normal "this is
+// disconnected" should use the method.
+func IsProbableEOF(err error) bool {
+	if err == nil {
+		return false
+	}
+	var uerr *url.Error
+	if errors.As(err, &uerr) {
+		err = uerr.Err
+	}
+	msg := err.Error()
+	switch {
+	case err == io.EOF:
+		return true
+	case err == io.ErrUnexpectedEOF:
+		return true
+	case msg == "http: can't write HTTP request on broken connection":
+		return true
+	case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"):
+		return true
+	case strings.Contains(msg, "connection reset by peer"):
+		return true
+	case strings.Contains(strings.ToLower(msg), "use of closed network connection"):
+		return true
+	}
+	return false
+}
+
+var defaultTransport = http.DefaultTransport.(*http.Transport)
+
+// SetOldTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetOldTransportDefaults(t *http.Transport) *http.Transport {
+	if t.Proxy == nil || isDefault(t.Proxy) {
+		// http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
+		// ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
+		t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
+	}
+	// If no custom dialer is set, use the default context dialer
+	if t.DialContext == nil && t.Dial == nil {
+		t.DialContext = defaultTransport.DialContext
+	}
+	if t.TLSHandshakeTimeout == 0 {
+		t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout
+	}
+	if t.IdleConnTimeout == 0 {
+		t.IdleConnTimeout = defaultTransport.IdleConnTimeout
+	}
+	return t
+}
+
+// SetTransportDefaults applies the defaults from http.DefaultTransport
+// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset
+func SetTransportDefaults(t *http.Transport) *http.Transport {
+	t = SetOldTransportDefaults(t)
+	// Allow clients to disable http2 if needed.
+	if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
+		klog.Infof("HTTP2 has been explicitly disabled")
+	} else if allowsHTTP2(t) {
+		if err := http2.ConfigureTransport(t); err != nil {
+			klog.Warningf("Transport failed http2 configuration: %v", err)
+		}
+	}
+	return t
+}
+
+func allowsHTTP2(t *http.Transport) bool {
+	if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 {
+		// the transport expressed no NextProto preference, allow
+		return true
+	}
+	for _, p := range t.TLSClientConfig.NextProtos {
+		if p == http2.NextProtoTLS {
+			// the transport explicitly allowed http/2
+			return true
+		}
+	}
+	// the transport explicitly set NextProtos and excluded http/2
+	return false
+}
+
+type RoundTripperWrapper interface {
+	http.RoundTripper
+	WrappedRoundTripper() http.RoundTripper
+}
+
+type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error)
+
+func DialerFor(transport http.RoundTripper) (DialFunc, error) {
+	if transport == nil {
+		return nil, nil
+	}
+
+	switch transport := transport.(type) {
+	case *http.Transport:
+		// transport.DialContext takes precedence over transport.Dial
+		if transport.DialContext != nil {
+			return transport.DialContext, nil
+		}
+		// adapt transport.Dial to the DialWithContext signature
+		if transport.Dial != nil {
+			return func(ctx context.Context, net, addr string) (net.Conn, error) {
+				return transport.Dial(net, addr)
+			}, nil
+		}
+		// otherwise return nil
+		return nil, nil
+	case RoundTripperWrapper:
+		return DialerFor(transport.WrappedRoundTripper())
+	default:
+		return nil, fmt.Errorf("unknown transport type: %T", transport)
+	}
+}
+
+type TLSClientConfigHolder interface {
+	TLSClientConfig() *tls.Config
+}
+
+func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) {
+	if transport == nil {
+		return nil, nil
+	}
+
+	switch transport := transport.(type) {
+	case *http.Transport:
+		return transport.TLSClientConfig, nil
+	case TLSClientConfigHolder:
+		return transport.TLSClientConfig(), nil
+	case RoundTripperWrapper:
+		return TLSClientConfig(transport.WrappedRoundTripper())
+	default:
+		return nil, fmt.Errorf("unknown transport type: %T", transport)
+	}
+}
+
+func FormatURL(scheme string, host string, port int, path string) *url.URL {
+	return &url.URL{
+		Scheme: scheme,
+		Host:   net.JoinHostPort(host, strconv.Itoa(port)),
+		Path:   path,
+	}
+}
+
+func GetHTTPClient(req *http.Request) string {
+	if ua := req.UserAgent(); len(ua) != 0 {
+		return ua
+	}
+	return "unknown"
+}
+
+// SourceIPs splits the comma separated X-Forwarded-For header and joins it with
+// the X-Real-Ip header and/or req.RemoteAddr, ignoring invalid IPs.
+// The X-Real-Ip is omitted if it's already present in the X-Forwarded-For chain.
+// The req.RemoteAddr is always the last IP in the returned list.
+// It returns nil if all of these are empty or invalid.
+func SourceIPs(req *http.Request) []net.IP {
+	var srcIPs []net.IP
+
+	hdr := req.Header
+	// First check the X-Forwarded-For header for requests via proxy.
+	hdrForwardedFor := hdr.Get("X-Forwarded-For")
+	if hdrForwardedFor != "" {
+		// X-Forwarded-For can be a csv of IPs in case of multiple proxies.
+		// Use the first valid one.
+		parts := strings.Split(hdrForwardedFor, ",")
+		for _, part := range parts {
+			ip := net.ParseIP(strings.TrimSpace(part))
+			if ip != nil {
+				srcIPs = append(srcIPs, ip)
+			}
+		}
+	}
+
+	// Try the X-Real-Ip header.
+	hdrRealIp := hdr.Get("X-Real-Ip")
+	if hdrRealIp != "" {
+		ip := net.ParseIP(hdrRealIp)
+		// Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain.
+		if ip != nil && !containsIP(srcIPs, ip) {
+			srcIPs = append(srcIPs, ip)
+		}
+	}
+
+	// Always include the request Remote Address as it cannot be easily spoofed.
+	var remoteIP net.IP
+	// Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
+	host, _, err := net.SplitHostPort(req.RemoteAddr)
+	if err == nil {
+		remoteIP = net.ParseIP(host)
+	}
+	// Fallback if Remote Address was just IP.
+	if remoteIP == nil {
+		remoteIP = net.ParseIP(req.RemoteAddr)
+	}
+
+	// Don't duplicate remote IP if it's already the last address in the chain.
+	if remoteIP != nil && (len(srcIPs) == 0 || !remoteIP.Equal(srcIPs[len(srcIPs)-1])) {
+		srcIPs = append(srcIPs, remoteIP)
+	}
+
+	return srcIPs
+}
+
+// Checks whether the given IP address is contained in the list of IPs.
+func containsIP(ips []net.IP, ip net.IP) bool {
+	for _, v := range ips {
+		if v.Equal(ip) {
+			return true
+		}
+	}
+	return false
+}
+
+// Extracts and returns the clients IP from the given request.
+// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order.
+// Returns nil if none of them are set or is set to an invalid value.
+func GetClientIP(req *http.Request) net.IP {
+	ips := SourceIPs(req)
+	if len(ips) == 0 {
+		return nil
+	}
+	return ips[0]
+}
+
+// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's
+// IP address to the X-Forwarded-For chain.
+func AppendForwardedForHeader(req *http.Request) {
+	// Copied from net/http/httputil/reverseproxy.go:
+	if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
+		// If we aren't the first proxy retain prior
+		// X-Forwarded-For information as a comma+space
+		// separated list and fold multiple headers into one.
+		if prior, ok := req.Header["X-Forwarded-For"]; ok {
+			clientIP = strings.Join(prior, ", ") + ", " + clientIP
+		}
+		req.Header.Set("X-Forwarded-For", clientIP)
+	}
+}
+
+var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment)
+
+// isDefault checks to see if the transportProxierFunc is pointing to the default one
+func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool {
+	transportProxierPointer := fmt.Sprintf("%p", transportProxier)
+	return transportProxierPointer == defaultProxyFuncPointer
+}
+
+// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
+// no matching CIDRs are found
+func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
+	// we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it
+	noProxyEnv := os.Getenv("NO_PROXY")
+	if noProxyEnv == "" {
+		noProxyEnv = os.Getenv("no_proxy")
+	}
+	noProxyRules := strings.Split(noProxyEnv, ",")
+
+	cidrs := []*net.IPNet{}
+	for _, noProxyRule := range noProxyRules {
+		_, cidr, _ := net.ParseCIDR(noProxyRule)
+		if cidr != nil {
+			cidrs = append(cidrs, cidr)
+		}
+	}
+
+	if len(cidrs) == 0 {
+		return delegate
+	}
+
+	return func(req *http.Request) (*url.URL, error) {
+		ip := net.ParseIP(req.URL.Hostname())
+		if ip == nil {
+			return delegate(req)
+		}
+
+		for _, cidr := range cidrs {
+			if cidr.Contains(ip) {
+				return nil, nil
+			}
+		}
+
+		return delegate(req)
+	}
+}
+
+// DialerFunc implements Dialer for the provided function.
+type DialerFunc func(req *http.Request) (net.Conn, error)
+
+func (fn DialerFunc) Dial(req *http.Request) (net.Conn, error) {
+	return fn(req)
+}
+
+// Dialer dials a host and writes a request to it.
+type Dialer interface {
+	// Dial connects to the host specified by req's URL, writes the request to the connection, and
+	// returns the opened net.Conn.
+	Dial(req *http.Request) (net.Conn, error)
+}
+
+// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to
+// originalLocation). It returns the opened net.Conn and the raw response bytes.
+// If requireSameHostRedirects is true, only redirects to the same host are permitted.
+func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) {
+	const (
+		maxRedirects    = 9     // Fail on the 10th redirect
+		maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers
+	)
+
+	var (
+		location         = originalLocation
+		method           = originalMethod
+		intermediateConn net.Conn
+		rawResponse      = bytes.NewBuffer(make([]byte, 0, 256))
+		body             = originalBody
+	)
+
+	defer func() {
+		if intermediateConn != nil {
+			intermediateConn.Close()
+		}
+	}()
+
+redirectLoop:
+	for redirects := 0; ; redirects++ {
+		if redirects > maxRedirects {
+			return nil, nil, fmt.Errorf("too many redirects (%d)", redirects)
+		}
+
+		req, err := http.NewRequest(method, location.String(), body)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		req.Header = header
+
+		intermediateConn, err = dialer.Dial(req)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		// Peek at the backend response.
+		rawResponse.Reset()
+		respReader := bufio.NewReader(io.TeeReader(
+			io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes.
+			rawResponse)) // Save the raw response.
+		resp, err := http.ReadResponse(respReader, nil)
+		if err != nil {
+			// Unable to read the backend response; let the client handle it.
+			klog.Warningf("Error reading backend response: %v", err)
+			break redirectLoop
+		}
+
+		switch resp.StatusCode {
+		case http.StatusFound:
+			// Redirect, continue.
+		default:
+			// Don't redirect.
+			break redirectLoop
+		}
+
+		// Redirected requests switch to "GET" according to the HTTP spec:
+		// https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3
+		method = "GET"
+		// don't send a body when following redirects
+		body = nil
+
+		resp.Body.Close() // not used
+
+		// Prepare to follow the redirect.
+		redirectStr := resp.Header.Get("Location")
+		if redirectStr == "" {
+			return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode)
+		}
+		// We have to parse relative to the current location, NOT originalLocation. For example,
+		// if we request http://foo.com/a and get back "http://bar.com/b", the result should be
+		// http://bar.com/b. If we then make that request and get back a redirect to "/c", the result
+		// should be http://bar.com/c, not http://foo.com/c.
+		location, err = location.Parse(redirectStr)
+		if err != nil {
+			return nil, nil, fmt.Errorf("malformed Location header: %v", err)
+		}
+
+		// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
+		if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
+			return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname())
+		}
+
+		// Reset the connection.
+		intermediateConn.Close()
+		intermediateConn = nil
+	}
+
+	connToReturn := intermediateConn
+	intermediateConn = nil // Don't close the connection when we return it.
+	return connToReturn, rawResponse.Bytes(), nil
+}
+
+// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers.
+func CloneRequest(req *http.Request) *http.Request {
+	r := new(http.Request)
+
+	// shallow clone
+	*r = *req
+
+	// deep copy headers
+	r.Header = CloneHeader(req.Header)
+
+	return r
+}
+
+// CloneHeader creates a deep copy of an http.Header.
+func CloneHeader(in http.Header) http.Header {
+	out := make(http.Header, len(in))
+	for key, values := range in {
+		newValues := make([]string, len(values))
+		copy(newValues, values)
+		out[key] = newValues
+	}
+	return out
+}
+
+// WarningHeader contains a single RFC2616 14.46 warnings header
+type WarningHeader struct {
+	// Codeindicates the type of warning. 299 is a miscellaneous persistent warning
+	Code int
+	// Agent contains the name or pseudonym of the server adding the Warning header.
+	// A single "-" is recommended when agent is unknown.
+	Agent string
+	// Warning text
+	Text string
+}
+
+// ParseWarningHeaders extract RFC2616 14.46 warnings headers from the specified set of header values.
+// Multiple comma-separated warnings per header are supported.
+// If errors are encountered on a header, the remainder of that header are skipped and subsequent headers are parsed.
+// Returns successfully parsed warnings and any errors encountered.
+func ParseWarningHeaders(headers []string) ([]WarningHeader, []error) {
+	var (
+		results []WarningHeader
+		errs    []error
+	)
+	for _, header := range headers {
+		for len(header) > 0 {
+			result, remainder, err := ParseWarningHeader(header)
+			if err != nil {
+				errs = append(errs, err)
+				break
+			}
+			results = append(results, result)
+			header = remainder
+		}
+	}
+	return results, errs
+}
+
+var (
+	codeMatcher = regexp.MustCompile(`^[0-9]{3}$`)
+	wordDecoder = &mime.WordDecoder{}
+)
+
+// ParseWarningHeader extracts one RFC2616 14.46 warning from the specified header,
+// returning an error if the header does not contain a correctly formatted warning.
+// Any remaining content in the header is returned.
+func ParseWarningHeader(header string) (result WarningHeader, remainder string, err error) {
+	// https://tools.ietf.org/html/rfc2616#section-14.46
+	//   updated by
+	// https://tools.ietf.org/html/rfc7234#section-5.5
+	//   https://tools.ietf.org/html/rfc7234#appendix-A
+	//     Some requirements regarding production and processing of the Warning
+	//     header fields have been relaxed, as it is not widely implemented.
+	//     Furthermore, the Warning header field no longer uses RFC 2047
+	//     encoding, nor does it allow multiple languages, as these aspects were
+	//     not implemented.
+	//
+	// Format is one of:
+	// warn-code warn-agent "warn-text"
+	// warn-code warn-agent "warn-text" "warn-date"
+	//
+	// warn-code is a three digit number
+	// warn-agent is unquoted and contains no spaces
+	// warn-text is quoted with backslash escaping (RFC2047-encoded according to RFC2616, not encoded according to RFC7234)
+	// warn-date is optional, quoted, and in HTTP-date format (no embedded or escaped quotes)
+	//
+	// additional warnings can optionally be included in the same header by comma-separating them:
+	// warn-code warn-agent "warn-text" "warn-date"[, warn-code warn-agent "warn-text" "warn-date", ...]
+
+	// tolerate leading whitespace
+	header = strings.TrimSpace(header)
+
+	parts := strings.SplitN(header, " ", 3)
+	if len(parts) != 3 {
+		return WarningHeader{}, "", errors.New("invalid warning header: fewer than 3 segments")
+	}
+	code, agent, textDateRemainder := parts[0], parts[1], parts[2]
+
+	// verify code format
+	if !codeMatcher.Match([]byte(code)) {
+		return WarningHeader{}, "", errors.New("invalid warning header: code segment is not 3 digits between 100-299")
+	}
+	codeInt, _ := strconv.ParseInt(code, 10, 64)
+
+	// verify agent presence
+	if len(agent) == 0 {
+		return WarningHeader{}, "", errors.New("invalid warning header: empty agent segment")
+	}
+	if !utf8.ValidString(agent) || hasAnyRunes(agent, unicode.IsControl) {
+		return WarningHeader{}, "", errors.New("invalid warning header: invalid agent")
+	}
+
+	// verify textDateRemainder presence
+	if len(textDateRemainder) == 0 {
+		return WarningHeader{}, "", errors.New("invalid warning header: empty text segment")
+	}
+
+	// extract text
+	text, dateAndRemainder, err := parseQuotedString(textDateRemainder)
+	if err != nil {
+		return WarningHeader{}, "", fmt.Errorf("invalid warning header: %v", err)
+	}
+	// tolerate RFC2047-encoded text from warnings produced according to RFC2616
+	if decodedText, err := wordDecoder.DecodeHeader(text); err == nil {
+		text = decodedText
+	}
+	if !utf8.ValidString(text) || hasAnyRunes(text, unicode.IsControl) {
+		return WarningHeader{}, "", errors.New("invalid warning header: invalid text")
+	}
+	result = WarningHeader{Code: int(codeInt), Agent: agent, Text: text}
+
+	if len(dateAndRemainder) > 0 {
+		if dateAndRemainder[0] == '"' {
+			// consume date
+			foundEndQuote := false
+			for i := 1; i < len(dateAndRemainder); i++ {
+				if dateAndRemainder[i] == '"' {
+					foundEndQuote = true
+					remainder = strings.TrimSpace(dateAndRemainder[i+1:])
+					break
+				}
+			}
+			if !foundEndQuote {
+				return WarningHeader{}, "", errors.New("invalid warning header: unterminated date segment")
+			}
+		} else {
+			remainder = dateAndRemainder
+		}
+	}
+	if len(remainder) > 0 {
+		if remainder[0] == ',' {
+			// consume comma if present
+			remainder = strings.TrimSpace(remainder[1:])
+		} else {
+			return WarningHeader{}, "", errors.New("invalid warning header: unexpected token after warn-date")
+		}
+	}
+
+	return result, remainder, nil
+}
+
+func parseQuotedString(quotedString string) (string, string, error) {
+	if len(quotedString) == 0 {
+		return "", "", errors.New("invalid quoted string: 0-length")
+	}
+
+	if quotedString[0] != '"' {
+		return "", "", errors.New("invalid quoted string: missing initial quote")
+	}
+
+	quotedString = quotedString[1:]
+	var remainder string
+	escaping := false
+	closedQuote := false
+	result := &bytes.Buffer{}
+loop:
+	for i := 0; i < len(quotedString); i++ {
+		b := quotedString[i]
+		switch b {
+		case '"':
+			if escaping {
+				result.WriteByte(b)
+				escaping = false
+			} else {
+				closedQuote = true
+				remainder = strings.TrimSpace(quotedString[i+1:])
+				break loop
+			}
+		case '\\':
+			if escaping {
+				result.WriteByte(b)
+				escaping = false
+			} else {
+				escaping = true
+			}
+		default:
+			result.WriteByte(b)
+			escaping = false
+		}
+	}
+
+	if !closedQuote {
+		return "", "", errors.New("invalid quoted string: missing closing quote")
+	}
+	return result.String(), remainder, nil
+}
+
+func NewWarningHeader(code int, agent, text string) (string, error) {
+	if code < 0 || code > 999 {
+		return "", errors.New("code must be between 0 and 999")
+	}
+	if len(agent) == 0 {
+		agent = "-"
+	} else if !utf8.ValidString(agent) || strings.ContainsAny(agent, `\"`) || hasAnyRunes(agent, unicode.IsSpace, unicode.IsControl) {
+		return "", errors.New("agent must be valid UTF-8 and must not contain spaces, quotes, backslashes, or control characters")
+	}
+	if !utf8.ValidString(text) || hasAnyRunes(text, unicode.IsControl) {
+		return "", errors.New("text must be valid UTF-8 and must not contain control characters")
+	}
+	return fmt.Sprintf("%03d %s %s", code, agent, makeQuotedString(text)), nil
+}
+
+func hasAnyRunes(s string, runeCheckers ...func(rune) bool) bool {
+	for _, r := range s {
+		for _, checker := range runeCheckers {
+			if checker(r) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func makeQuotedString(s string) string {
+	result := &bytes.Buffer{}
+	// opening quote
+	result.WriteRune('"')
+	for _, c := range s {
+		switch c {
+		case '"', '\\':
+			// escape " and \
+			result.WriteRune('\\')
+			result.WriteRune(c)
+		default:
+			// write everything else as-is
+			result.WriteRune(c)
+		}
+	}
+	// closing quote
+	result.WriteRune('"')
+	return result.String()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
new file mode 100644
index 0000000..204e223
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go
@@ -0,0 +1,457 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"bufio"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net"
+	"os"
+
+	"strings"
+
+	"k8s.io/klog/v2"
+)
+
+type AddressFamily uint
+
+const (
+	familyIPv4 AddressFamily = 4
+	familyIPv6 AddressFamily = 6
+)
+
+type AddressFamilyPreference []AddressFamily
+
+var (
+	preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6}
+	preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4}
+)
+
+const (
+	// LoopbackInterfaceName is the default name of the loopback interface
+	LoopbackInterfaceName = "lo"
+)
+
+const (
+	ipv4RouteFile = "/proc/net/route"
+	ipv6RouteFile = "/proc/net/ipv6_route"
+)
+
+type Route struct {
+	Interface   string
+	Destination net.IP
+	Gateway     net.IP
+	Family      AddressFamily
+}
+
+type RouteFile struct {
+	name  string
+	parse func(input io.Reader) ([]Route, error)
+}
+
+// noRoutesError can be returned in case of no routes
+type noRoutesError struct {
+	message string
+}
+
+func (e noRoutesError) Error() string {
+	return e.message
+}
+
+// IsNoRoutesError checks if an error is of type noRoutesError
+func IsNoRoutesError(err error) bool {
+	if err == nil {
+		return false
+	}
+	switch err.(type) {
+	case noRoutesError:
+		return true
+	default:
+		return false
+	}
+}
+
+var (
+	v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes}
+	v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes}
+)
+
+func (rf RouteFile) extract() ([]Route, error) {
+	file, err := os.Open(rf.name)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	return rf.parse(file)
+}
+
+// getIPv4DefaultRoutes obtains the IPv4 routes, and filters out non-default routes.
+func getIPv4DefaultRoutes(input io.Reader) ([]Route, error) {
+	routes := []Route{}
+	scanner := bufio.NewReader(input)
+	for {
+		line, err := scanner.ReadString('\n')
+		if err == io.EOF {
+			break
+		}
+		//ignore the headers in the route info
+		if strings.HasPrefix(line, "Iface") {
+			continue
+		}
+		fields := strings.Fields(line)
+		// Interested in fields:
+		//  0 - interface name
+		//  1 - destination address
+		//  2 - gateway
+		dest, err := parseIP(fields[1], familyIPv4)
+		if err != nil {
+			return nil, err
+		}
+		gw, err := parseIP(fields[2], familyIPv4)
+		if err != nil {
+			return nil, err
+		}
+		if !dest.Equal(net.IPv4zero) {
+			continue
+		}
+		routes = append(routes, Route{
+			Interface:   fields[0],
+			Destination: dest,
+			Gateway:     gw,
+			Family:      familyIPv4,
+		})
+	}
+	return routes, nil
+}
+
+func getIPv6DefaultRoutes(input io.Reader) ([]Route, error) {
+	routes := []Route{}
+	scanner := bufio.NewReader(input)
+	for {
+		line, err := scanner.ReadString('\n')
+		if err == io.EOF {
+			break
+		}
+		fields := strings.Fields(line)
+		// Interested in fields:
+		//  0 - destination address
+		//  4 - gateway
+		//  9 - interface name
+		dest, err := parseIP(fields[0], familyIPv6)
+		if err != nil {
+			return nil, err
+		}
+		gw, err := parseIP(fields[4], familyIPv6)
+		if err != nil {
+			return nil, err
+		}
+		if !dest.Equal(net.IPv6zero) {
+			continue
+		}
+		if gw.Equal(net.IPv6zero) {
+			continue // loopback
+		}
+		routes = append(routes, Route{
+			Interface:   fields[9],
+			Destination: dest,
+			Gateway:     gw,
+			Family:      familyIPv6,
+		})
+	}
+	return routes, nil
+}
+
+// parseIP takes the hex IP address string from route file and converts it
+// to a net.IP address. For IPv4, the value must be converted to big endian.
+func parseIP(str string, family AddressFamily) (net.IP, error) {
+	if str == "" {
+		return nil, fmt.Errorf("input is nil")
+	}
+	bytes, err := hex.DecodeString(str)
+	if err != nil {
+		return nil, err
+	}
+	if family == familyIPv4 {
+		if len(bytes) != net.IPv4len {
+			return nil, fmt.Errorf("invalid IPv4 address in route")
+		}
+		return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil
+	}
+	// Must be IPv6
+	if len(bytes) != net.IPv6len {
+		return nil, fmt.Errorf("invalid IPv6 address in route")
+	}
+	return net.IP(bytes), nil
+}
+
+func isInterfaceUp(intf *net.Interface) bool {
+	if intf == nil {
+		return false
+	}
+	if intf.Flags&net.FlagUp != 0 {
+		klog.V(4).Infof("Interface %v is up", intf.Name)
+		return true
+	}
+	return false
+}
+
+func isLoopbackOrPointToPoint(intf *net.Interface) bool {
+	return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0
+}
+
+// getMatchingGlobalIP returns the first valid global unicast address of the given
+// 'family' from the list of 'addrs'.
+func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) {
+	if len(addrs) > 0 {
+		for i := range addrs {
+			klog.V(4).Infof("Checking addr  %s.", addrs[i].String())
+			ip, _, err := net.ParseCIDR(addrs[i].String())
+			if err != nil {
+				return nil, err
+			}
+			if memberOf(ip, family) {
+				if ip.IsGlobalUnicast() {
+					klog.V(4).Infof("IP found %v", ip)
+					return ip, nil
+				} else {
+					klog.V(4).Infof("Non-global unicast address found %v", ip)
+				}
+			} else {
+				klog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
+			}
+
+		}
+	}
+	return nil, nil
+}
+
+// getIPFromInterface gets the IPs on an interface and returns a global unicast address, if any. The
+// interface must be up, the IP must in the family requested, and the IP must be a global unicast address.
+func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) {
+	intf, err := nw.InterfaceByName(intfName)
+	if err != nil {
+		return nil, err
+	}
+	if isInterfaceUp(intf) {
+		addrs, err := nw.Addrs(intf)
+		if err != nil {
+			return nil, err
+		}
+		klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
+		matchingIP, err := getMatchingGlobalIP(addrs, forFamily)
+		if err != nil {
+			return nil, err
+		}
+		if matchingIP != nil {
+			klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
+			return matchingIP, nil
+		}
+	}
+	return nil, nil
+}
+
+// memberOf tells if the IP is of the desired family. Used for checking interface addresses.
+func memberOf(ip net.IP, family AddressFamily) bool {
+	if ip.To4() != nil {
+		return family == familyIPv4
+	} else {
+		return family == familyIPv6
+	}
+}
+
+// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that
+// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP.
+// addressFamilies determines whether it prefers IPv4 or IPv6
+func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) {
+	intfs, err := nw.Interfaces()
+	if err != nil {
+		return nil, err
+	}
+	if len(intfs) == 0 {
+		return nil, fmt.Errorf("no interfaces found on host.")
+	}
+	for _, family := range addressFamilies {
+		klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
+		for _, intf := range intfs {
+			if !isInterfaceUp(&intf) {
+				klog.V(4).Infof("Skipping: down interface %q", intf.Name)
+				continue
+			}
+			if isLoopbackOrPointToPoint(&intf) {
+				klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
+				continue
+			}
+			addrs, err := nw.Addrs(&intf)
+			if err != nil {
+				return nil, err
+			}
+			if len(addrs) == 0 {
+				klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
+				continue
+			}
+			for _, addr := range addrs {
+				ip, _, err := net.ParseCIDR(addr.String())
+				if err != nil {
+					return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err)
+				}
+				if !memberOf(ip, family) {
+					klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
+					continue
+				}
+				// TODO: Decide if should open up to allow IPv6 LLAs in future.
+				if !ip.IsGlobalUnicast() {
+					klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
+					continue
+				}
+				klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
+				return ip, nil
+			}
+		}
+	}
+	return nil, fmt.Errorf("no acceptable interface with global unicast address found on host")
+}
+
+// ChooseHostInterface is a method used fetch an IP for a daemon.
+// If there is no routing info file, it will choose a global IP from the system
+// interfaces. Otherwise, it will use IPv4 and IPv6 route information to return the
+// IP of the interface with a gateway on it (with priority given to IPv4). For a node
+// with no internet connection, it returns error.
+func ChooseHostInterface() (net.IP, error) {
+	return chooseHostInterface(preferIPv4)
+}
+
+func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) {
+	var nw networkInterfacer = networkInterface{}
+	if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) {
+		return chooseIPFromHostInterfaces(nw, addressFamilies)
+	}
+	routes, err := getAllDefaultRoutes()
+	if err != nil {
+		return nil, err
+	}
+	return chooseHostInterfaceFromRoute(routes, nw, addressFamilies)
+}
+
+// networkInterfacer defines an interface for several net library functions. Production
+// code will forward to net library functions, and unit tests will override the methods
+// for testing purposes.
+type networkInterfacer interface {
+	InterfaceByName(intfName string) (*net.Interface, error)
+	Addrs(intf *net.Interface) ([]net.Addr, error)
+	Interfaces() ([]net.Interface, error)
+}
+
+// networkInterface implements the networkInterfacer interface for production code, just
+// wrapping the underlying net library function calls.
+type networkInterface struct{}
+
+func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) {
+	return net.InterfaceByName(intfName)
+}
+
+func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) {
+	return intf.Addrs()
+}
+
+func (_ networkInterface) Interfaces() ([]net.Interface, error) {
+	return net.Interfaces()
+}
+
+// getAllDefaultRoutes obtains IPv4 and IPv6 default routes on the node. If unable
+// to read the IPv4 routing info file, we return an error. If unable to read the IPv6
+// routing info file (which is optional), we'll just use the IPv4 route information.
+// Using all the routing info, if no default routes are found, an error is returned.
+func getAllDefaultRoutes() ([]Route, error) {
+	routes, err := v4File.extract()
+	if err != nil {
+		return nil, err
+	}
+	v6Routes, _ := v6File.extract()
+	routes = append(routes, v6Routes...)
+	if len(routes) == 0 {
+		return nil, noRoutesError{
+			message: fmt.Sprintf("no default routes found in %q or %q", v4File.name, v6File.name),
+		}
+	}
+	return routes, nil
+}
+
+// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a
+// global IP address from the interface for the route. addressFamilies determines whether it
+// prefers IPv4 or IPv6
+func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) {
+	for _, family := range addressFamilies {
+		klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
+		for _, route := range routes {
+			if route.Family != family {
+				continue
+			}
+			klog.V(4).Infof("Default route transits interface %q", route.Interface)
+			finalIP, err := getIPFromInterface(route.Interface, family, nw)
+			if err != nil {
+				return nil, err
+			}
+			if finalIP != nil {
+				klog.V(4).Infof("Found active IP %v ", finalIP)
+				return finalIP, nil
+			}
+		}
+	}
+	klog.V(4).Infof("No active IP found by looking at default routes")
+	return nil, fmt.Errorf("unable to select an IP from default routes.")
+}
+
+// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress:
+// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface().
+// If bindAddress is unspecified or loopback, it returns the default IP of the same
+// address family as bindAddress.
+// Otherwise, it just returns bindAddress.
+func ResolveBindAddress(bindAddress net.IP) (net.IP, error) {
+	addressFamilies := preferIPv4
+	if bindAddress != nil && memberOf(bindAddress, familyIPv6) {
+		addressFamilies = preferIPv6
+	}
+
+	if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() {
+		hostIP, err := chooseHostInterface(addressFamilies)
+		if err != nil {
+			return nil, err
+		}
+		bindAddress = hostIP
+	}
+	return bindAddress, nil
+}
+
+// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4.
+// This is required in case of network setups where default routes are present, but network
+// interfaces use only link-local addresses (e.g. as described in RFC5549).
+// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface.
+func ChooseBindAddressForInterface(intfName string) (net.IP, error) {
+	var nw networkInterfacer = networkInterface{}
+	for _, family := range preferIPv4 {
+		ip, err := getIPFromInterface(intfName, family, nw)
+		if err != nil {
+			return nil, err
+		}
+		if ip != nil {
+			return ip, nil
+		}
+	}
+	return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
new file mode 100644
index 0000000..7b6eca8
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// PortRange represents a range of TCP/UDP ports.  To represent a single port,
+// set Size to 1.
+type PortRange struct {
+	Base int
+	Size int
+}
+
+// Contains tests whether a given port falls within the PortRange.
+func (pr *PortRange) Contains(p int) bool {
+	return (p >= pr.Base) && ((p - pr.Base) < pr.Size)
+}
+
+// String converts the PortRange to a string representation, which can be
+// parsed by PortRange.Set or ParsePortRange.
+func (pr PortRange) String() string {
+	if pr.Size == 0 {
+		return ""
+	}
+	return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1)
+}
+
+// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and
+// sets the PortRange from it.  This is part of the flag.Value and pflag.Value
+// interfaces.
+func (pr *PortRange) Set(value string) error {
+	const (
+		SinglePortNotation = 1 << iota
+		HyphenNotation
+		PlusNotation
+	)
+
+	value = strings.TrimSpace(value)
+	hyphenIndex := strings.Index(value, "-")
+	plusIndex := strings.Index(value, "+")
+
+	if value == "" {
+		pr.Base = 0
+		pr.Size = 0
+		return nil
+	}
+
+	var err error
+	var low, high int
+	var notation int
+
+	if plusIndex == -1 && hyphenIndex == -1 {
+		notation |= SinglePortNotation
+	}
+	if hyphenIndex != -1 {
+		notation |= HyphenNotation
+	}
+	if plusIndex != -1 {
+		notation |= PlusNotation
+	}
+
+	switch notation {
+	case SinglePortNotation:
+		var port int
+		port, err = strconv.Atoi(value)
+		if err != nil {
+			return err
+		}
+		low = port
+		high = port
+	case HyphenNotation:
+		low, err = strconv.Atoi(value[:hyphenIndex])
+		if err != nil {
+			return err
+		}
+		high, err = strconv.Atoi(value[hyphenIndex+1:])
+		if err != nil {
+			return err
+		}
+	case PlusNotation:
+		var offset int
+		low, err = strconv.Atoi(value[:plusIndex])
+		if err != nil {
+			return err
+		}
+		offset, err = strconv.Atoi(value[plusIndex+1:])
+		if err != nil {
+			return err
+		}
+		high = low + offset
+	default:
+		return fmt.Errorf("unable to parse port range: %s", value)
+	}
+
+	if low > 65535 || high > 65535 {
+		return fmt.Errorf("the port range cannot be greater than 65535: %s", value)
+	}
+
+	if high < low {
+		return fmt.Errorf("end port cannot be less than start port: %s", value)
+	}
+
+	pr.Base = low
+	pr.Size = 1 + high - low
+	return nil
+}
+
+// Type returns a descriptive string about this type.  This is part of the
+// pflag.Value interface.
+func (*PortRange) Type() string {
+	return "portRange"
+}
+
+// ParsePortRange parses a string of the form "min-max", inclusive at both
+// ends, and initializs a new PortRange from it.
+func ParsePortRange(value string) (*PortRange, error) {
+	pr := &PortRange{}
+	err := pr.Set(value)
+	if err != nil {
+		return nil, err
+	}
+	return pr, nil
+}
+
+func ParsePortRangeOrDie(value string) *PortRange {
+	pr, err := ParsePortRange(value)
+	if err != nil {
+		panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err))
+	}
+	return pr
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go
new file mode 100644
index 0000000..c0fd4e2
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+var validSchemes = sets.NewString("http", "https", "")
+
+// SplitSchemeNamePort takes a string of the following forms:
+//  * "<name>",                 returns "",        "<name>","",      true
+//  * "<name>:<port>",          returns "",        "<name>","<port>",true
+//  * "<scheme>:<name>:<port>", returns "<scheme>","<name>","<port>",true
+//
+// Name must be non-empty or valid will be returned false.
+// Scheme must be "http" or "https" if specified
+// Port is returned as a string, and it is not required to be numeric (could be
+// used for a named port, for example).
+func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) {
+	parts := strings.Split(id, ":")
+	switch len(parts) {
+	case 1:
+		name = parts[0]
+	case 2:
+		name = parts[0]
+		port = parts[1]
+	case 3:
+		scheme = parts[0]
+		name = parts[1]
+		port = parts[2]
+	default:
+		return "", "", "", false
+	}
+
+	if len(name) > 0 && validSchemes.Has(scheme) {
+		return scheme, name, port, true
+	} else {
+		return "", "", "", false
+	}
+}
+
+// JoinSchemeNamePort returns a string that specifies the scheme, name, and port:
+//  * "<name>"
+//  * "<name>:<port>"
+//  * "<scheme>:<name>:<port>"
+// None of the parameters may contain a ':' character
+// Name is required
+// Scheme must be "", "http", or "https"
+func JoinSchemeNamePort(scheme, name, port string) string {
+	if len(scheme) > 0 {
+		// Must include three segments to specify scheme
+		return scheme + ":" + name + ":" + port
+	}
+	if len(port) > 0 {
+		// Must include two segments to specify port
+		return name + ":" + port
+	}
+	// Return name alone
+	return name
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
new file mode 100644
index 0000000..5950087
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package net
+
+import (
+	"errors"
+	"net"
+	"reflect"
+	"syscall"
+)
+
+// IPNetEqual checks if the two input IPNets are representing the same subnet.
+// For example,
+//	10.0.0.1/24 and 10.0.0.0/24 are the same subnet.
+//	10.0.0.1/24 and 10.0.0.0/25 are not the same subnet.
+func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool {
+	if ipnet1 == nil || ipnet2 == nil {
+		return false
+	}
+	if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) {
+		return true
+	}
+	return false
+}
+
+// Returns if the given err is "connection reset by peer" error.
+func IsConnectionReset(err error) bool {
+	var errno syscall.Errno
+	if errors.As(err, &errno) {
+		return errno == syscall.ECONNRESET
+	}
+	return false
+}
+
+// Returns if the given err is "connection refused" error
+func IsConnectionRefused(err error) bool {
+	var errno syscall.Errno
+	if errors.As(err, &errno) {
+		return errno == syscall.ECONNREFUSED
+	}
+	return false
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
new file mode 100644
index 0000000..e8a9f60
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -0,0 +1,173 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package runtime
+
+import (
+	"fmt"
+	"net/http"
+	"runtime"
+	"sync"
+	"time"
+
+	"k8s.io/klog/v2"
+)
+
+var (
+	// ReallyCrash controls the behavior of HandleCrash and now defaults
+	// true. It's still exposed so components can optionally set to false
+	// to restore prior behavior.
+	ReallyCrash = true
+)
+
+// PanicHandlers is a list of functions which will be invoked when a panic happens.
+var PanicHandlers = []func(interface{}){logPanic}
+
+// HandleCrash simply catches a crash and logs an error. Meant to be called via
+// defer.  Additional context-specific handlers can be provided, and will be
+// called in case of panic.  HandleCrash actually crashes, after calling the
+// handlers and logging the panic message.
+//
+// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
+func HandleCrash(additionalHandlers ...func(interface{})) {
+	if r := recover(); r != nil {
+		for _, fn := range PanicHandlers {
+			fn(r)
+		}
+		for _, fn := range additionalHandlers {
+			fn(r)
+		}
+		if ReallyCrash {
+			// Actually proceed to panic.
+			panic(r)
+		}
+	}
+}
+
+// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler).
+func logPanic(r interface{}) {
+	if r == http.ErrAbortHandler {
+		// honor the http.ErrAbortHandler sentinel panic value:
+		//   ErrAbortHandler is a sentinel panic value to abort a handler.
+		//   While any panic from ServeHTTP aborts the response to the client,
+		//   panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log.
+		return
+	}
+
+	// Same as stdlib http server code. Manually allocate stack trace buffer size
+	// to prevent excessively large logs
+	const size = 64 << 10
+	stacktrace := make([]byte, size)
+	stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
+	if _, ok := r.(string); ok {
+		klog.Errorf("Observed a panic: %s\n%s", r, stacktrace)
+	} else {
+		klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
+	}
+}
+
+// ErrorHandlers is a list of functions which will be invoked when an unreturnable
+// error occurs.
+// TODO(lavalamp): for testability, this and the below HandleError function
+// should be packaged up into a testable and reusable object.
+var ErrorHandlers = []func(error){
+	logError,
+	(&rudimentaryErrorBackoff{
+		lastErrorTime: time.Now(),
+		// 1ms was the number folks were able to stomach as a global rate limit.
+		// If you need to log errors more than 1000 times a second you
+		// should probably consider fixing your code instead. :)
+		minPeriod: time.Millisecond,
+	}).OnError,
+}
+
+// HandlerError is a method to invoke when a non-user facing piece of code cannot
+// return an error and needs to indicate it has been ignored. Invoking this method
+// is preferable to logging the error - the default behavior is to log but the
+// errors may be sent to a remote server for analysis.
+func HandleError(err error) {
+	// this is sometimes called with a nil error.  We probably shouldn't fail and should do nothing instead
+	if err == nil {
+		return
+	}
+
+	for _, fn := range ErrorHandlers {
+		fn(err)
+	}
+}
+
+// logError prints an error with the call stack of the location it was reported
+func logError(err error) {
+	klog.ErrorDepth(2, err)
+}
+
+type rudimentaryErrorBackoff struct {
+	minPeriod time.Duration // immutable
+	// TODO(lavalamp): use the clock for testability. Need to move that
+	// package for that to be accessible here.
+	lastErrorTimeLock sync.Mutex
+	lastErrorTime     time.Time
+}
+
+// OnError will block if it is called more often than the embedded period time.
+// This will prevent overly tight hot error loops.
+func (r *rudimentaryErrorBackoff) OnError(error) {
+	r.lastErrorTimeLock.Lock()
+	defer r.lastErrorTimeLock.Unlock()
+	d := time.Since(r.lastErrorTime)
+	if d < r.minPeriod {
+		// If the time moves backwards for any reason, do nothing
+		time.Sleep(r.minPeriod - d)
+	}
+	r.lastErrorTime = time.Now()
+}
+
+// GetCaller returns the caller of the function that calls it.
+func GetCaller() string {
+	var pc [1]uintptr
+	runtime.Callers(3, pc[:])
+	f := runtime.FuncForPC(pc[0])
+	if f == nil {
+		return fmt.Sprintf("Unable to find caller")
+	}
+	return f.Name()
+}
+
+// RecoverFromPanic replaces the specified error with an error containing the
+// original error, and  the call tree when a panic occurs. This enables error
+// handlers to handle errors and panics the same way.
+func RecoverFromPanic(err *error) {
+	if r := recover(); r != nil {
+		// Same as stdlib http server code. Manually allocate stack trace buffer size
+		// to prevent excessively large logs
+		const size = 64 << 10
+		stacktrace := make([]byte, size)
+		stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
+
+		*err = fmt.Errorf(
+			"recovered from panic %q. (err=%v) Call stack:\n%s",
+			r,
+			*err,
+			stacktrace)
+	}
+}
+
+// Must panics on non-nil errors.  Useful to handling programmer level errors.
+func Must(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
new file mode 100644
index 0000000..9bfa85d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go
@@ -0,0 +1,205 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
+type Byte map[byte]Empty
+
+// NewByte creates a Byte from a list of values.
+func NewByte(items ...byte) Byte {
+	ss := Byte{}
+	ss.Insert(items...)
+	return ss
+}
+
+// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func ByteKeySet(theMap interface{}) Byte {
+	v := reflect.ValueOf(theMap)
+	ret := Byte{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(byte))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s Byte) Insert(items ...byte) Byte {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+	return s
+}
+
+// Delete removes all items from the set.
+func (s Byte) Delete(items ...byte) Byte {
+	for _, item := range items {
+		delete(s, item)
+	}
+	return s
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Byte) Has(item byte) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Byte) HasAll(items ...byte) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Byte) HasAny(items ...byte) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Byte) Difference(s2 Byte) Byte {
+	result := NewByte()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Byte) Union(s2 Byte) Byte {
+	result := NewByte()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Byte) Intersection(s2 Byte) Byte {
+	var walk, other Byte
+	result := NewByte()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Byte) IsSuperset(s2 Byte) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Byte) Equal(s2 Byte) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfByte []byte
+
+func (s sortableSliceOfByte) Len() int           { return len(s) }
+func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) }
+func (s sortableSliceOfByte) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted byte slice.
+func (s Byte) List() []byte {
+	res := make(sortableSliceOfByte, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []byte(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Byte) UnsortedList() []byte {
+	res := make([]byte, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s Byte) PopAny() (byte, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue byte
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Byte) Len() int {
+	return len(s)
+}
+
+func lessByte(lhs, rhs byte) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
new file mode 100644
index 0000000..b152a0b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+// Package sets has auto-generated set types.
+package sets
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
new file mode 100644
index 0000000..e11e622
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go
@@ -0,0 +1,23 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+// Empty is public since it is used by some internal API objects for conversions between external
+// string arrays and internal sets, and conversion logic requires public types today.
+type Empty struct{}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
new file mode 100644
index 0000000..88bd709
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go
@@ -0,0 +1,205 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
+type Int map[int]Empty
+
+// NewInt creates a Int from a list of values.
+func NewInt(items ...int) Int {
+	ss := Int{}
+	ss.Insert(items...)
+	return ss
+}
+
+// IntKeySet creates a Int from a keys of a map[int](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func IntKeySet(theMap interface{}) Int {
+	v := reflect.ValueOf(theMap)
+	ret := Int{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(int))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s Int) Insert(items ...int) Int {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+	return s
+}
+
+// Delete removes all items from the set.
+func (s Int) Delete(items ...int) Int {
+	for _, item := range items {
+		delete(s, item)
+	}
+	return s
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int) Has(item int) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int) HasAll(items ...int) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int) HasAny(items ...int) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int) Difference(s2 Int) Int {
+	result := NewInt()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int) Union(s2 Int) Int {
+	result := NewInt()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int) Intersection(s2 Int) Int {
+	var walk, other Int
+	result := NewInt()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int) IsSuperset(s2 Int) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int) Equal(s2 Int) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt []int
+
+func (s sortableSliceOfInt) Len() int           { return len(s) }
+func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) }
+func (s sortableSliceOfInt) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int slice.
+func (s Int) List() []int {
+	res := make(sortableSliceOfInt, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []int(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int) UnsortedList() []int {
+	res := make([]int, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s Int) PopAny() (int, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue int
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int) Len() int {
+	return len(s)
+}
+
+func lessInt(lhs, rhs int) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go
new file mode 100644
index 0000000..96a4855
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go
@@ -0,0 +1,205 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption.
+type Int32 map[int32]Empty
+
+// NewInt32 creates a Int32 from a list of values.
+func NewInt32(items ...int32) Int32 {
+	ss := Int32{}
+	ss.Insert(items...)
+	return ss
+}
+
+// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int32KeySet(theMap interface{}) Int32 {
+	v := reflect.ValueOf(theMap)
+	ret := Int32{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(int32))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s Int32) Insert(items ...int32) Int32 {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+	return s
+}
+
+// Delete removes all items from the set.
+func (s Int32) Delete(items ...int32) Int32 {
+	for _, item := range items {
+		delete(s, item)
+	}
+	return s
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int32) Has(item int32) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int32) HasAll(items ...int32) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int32) HasAny(items ...int32) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int32) Difference(s2 Int32) Int32 {
+	result := NewInt32()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int32) Union(s2 Int32) Int32 {
+	result := NewInt32()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int32) Intersection(s2 Int32) Int32 {
+	var walk, other Int32
+	result := NewInt32()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int32) IsSuperset(s2 Int32) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int32) Equal(s2 Int32) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt32 []int32
+
+func (s sortableSliceOfInt32) Len() int           { return len(s) }
+func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) }
+func (s sortableSliceOfInt32) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int32 slice.
+func (s Int32) List() []int32 {
+	res := make(sortableSliceOfInt32, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []int32(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int32) UnsortedList() []int32 {
+	res := make([]int32, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s Int32) PopAny() (int32, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue int32
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int32) Len() int {
+	return len(s)
+}
+
+func lessInt32(lhs, rhs int32) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
new file mode 100644
index 0000000..b375a1b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go
@@ -0,0 +1,205 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
+type Int64 map[int64]Empty
+
+// NewInt64 creates a Int64 from a list of values.
+func NewInt64(items ...int64) Int64 {
+	ss := Int64{}
+	ss.Insert(items...)
+	return ss
+}
+
+// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func Int64KeySet(theMap interface{}) Int64 {
+	v := reflect.ValueOf(theMap)
+	ret := Int64{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(int64))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s Int64) Insert(items ...int64) Int64 {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+	return s
+}
+
+// Delete removes all items from the set.
+func (s Int64) Delete(items ...int64) Int64 {
+	for _, item := range items {
+		delete(s, item)
+	}
+	return s
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s Int64) Has(item int64) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s Int64) HasAll(items ...int64) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s Int64) HasAny(items ...int64) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s Int64) Difference(s2 Int64) Int64 {
+	result := NewInt64()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 Int64) Union(s2 Int64) Int64 {
+	result := NewInt64()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 Int64) Intersection(s2 Int64) Int64 {
+	var walk, other Int64
+	result := NewInt64()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 Int64) IsSuperset(s2 Int64) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 Int64) Equal(s2 Int64) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfInt64 []int64
+
+func (s sortableSliceOfInt64) Len() int           { return len(s) }
+func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) }
+func (s sortableSliceOfInt64) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted int64 slice.
+func (s Int64) List() []int64 {
+	res := make(sortableSliceOfInt64, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []int64(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s Int64) UnsortedList() []int64 {
+	res := make([]int64, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s Int64) PopAny() (int64, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue int64
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s Int64) Len() int {
+	return len(s)
+}
+
+func lessInt64(lhs, rhs int64) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
new file mode 100644
index 0000000..e6f37db
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go
@@ -0,0 +1,205 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by set-gen. DO NOT EDIT.
+
+package sets
+
+import (
+	"reflect"
+	"sort"
+)
+
+// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
+type String map[string]Empty
+
+// NewString creates a String from a list of values.
+func NewString(items ...string) String {
+	ss := String{}
+	ss.Insert(items...)
+	return ss
+}
+
+// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
+// If the value passed in is not actually a map, this will panic.
+func StringKeySet(theMap interface{}) String {
+	v := reflect.ValueOf(theMap)
+	ret := String{}
+
+	for _, keyValue := range v.MapKeys() {
+		ret.Insert(keyValue.Interface().(string))
+	}
+	return ret
+}
+
+// Insert adds items to the set.
+func (s String) Insert(items ...string) String {
+	for _, item := range items {
+		s[item] = Empty{}
+	}
+	return s
+}
+
+// Delete removes all items from the set.
+func (s String) Delete(items ...string) String {
+	for _, item := range items {
+		delete(s, item)
+	}
+	return s
+}
+
+// Has returns true if and only if item is contained in the set.
+func (s String) Has(item string) bool {
+	_, contained := s[item]
+	return contained
+}
+
+// HasAll returns true if and only if all items are contained in the set.
+func (s String) HasAll(items ...string) bool {
+	for _, item := range items {
+		if !s.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// HasAny returns true if any items are contained in the set.
+func (s String) HasAny(items ...string) bool {
+	for _, item := range items {
+		if s.Has(item) {
+			return true
+		}
+	}
+	return false
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s String) Difference(s2 String) String {
+	result := NewString()
+	for key := range s {
+		if !s2.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// Union returns a new set which includes items in either s1 or s2.
+// For example:
+// s1 = {a1, a2}
+// s2 = {a3, a4}
+// s1.Union(s2) = {a1, a2, a3, a4}
+// s2.Union(s1) = {a1, a2, a3, a4}
+func (s1 String) Union(s2 String) String {
+	result := NewString()
+	for key := range s1 {
+		result.Insert(key)
+	}
+	for key := range s2 {
+		result.Insert(key)
+	}
+	return result
+}
+
+// Intersection returns a new set which includes the item in BOTH s1 and s2
+// For example:
+// s1 = {a1, a2}
+// s2 = {a2, a3}
+// s1.Intersection(s2) = {a2}
+func (s1 String) Intersection(s2 String) String {
+	var walk, other String
+	result := NewString()
+	if s1.Len() < s2.Len() {
+		walk = s1
+		other = s2
+	} else {
+		walk = s2
+		other = s1
+	}
+	for key := range walk {
+		if other.Has(key) {
+			result.Insert(key)
+		}
+	}
+	return result
+}
+
+// IsSuperset returns true if and only if s1 is a superset of s2.
+func (s1 String) IsSuperset(s2 String) bool {
+	for item := range s2 {
+		if !s1.Has(item) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equal returns true if and only if s1 is equal (as a set) to s2.
+// Two sets are equal if their membership is identical.
+// (In practice, this means same elements, order doesn't matter)
+func (s1 String) Equal(s2 String) bool {
+	return len(s1) == len(s2) && s1.IsSuperset(s2)
+}
+
+type sortableSliceOfString []string
+
+func (s sortableSliceOfString) Len() int           { return len(s) }
+func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) }
+func (s sortableSliceOfString) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// List returns the contents as a sorted string slice.
+func (s String) List() []string {
+	res := make(sortableSliceOfString, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	sort.Sort(res)
+	return []string(res)
+}
+
+// UnsortedList returns the slice with contents in random order.
+func (s String) UnsortedList() []string {
+	res := make([]string, 0, len(s))
+	for key := range s {
+		res = append(res, key)
+	}
+	return res
+}
+
+// Returns a single element from the set.
+func (s String) PopAny() (string, bool) {
+	for key := range s {
+		s.Delete(key)
+		return key, true
+	}
+	var zeroValue string
+	return zeroValue, false
+}
+
+// Len returns the size of the set.
+func (s String) Len() int {
+	return len(s)
+}
+
+func lessString(lhs, rhs string) bool {
+	return lhs < rhs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
new file mode 100644
index 0000000..0cd5d65
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -0,0 +1,272 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// Error is an implementation of the 'error' interface, which represents a
+// field-level validation error.
+type Error struct {
+	Type     ErrorType
+	Field    string
+	BadValue interface{}
+	Detail   string
+}
+
+var _ error = &Error{}
+
+// Error implements the error interface.
+func (v *Error) Error() string {
+	return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody())
+}
+
+// ErrorBody returns the error message without the field name.  This is useful
+// for building nice-looking higher-level error reporting.
+func (v *Error) ErrorBody() string {
+	var s string
+	switch v.Type {
+	case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal:
+		s = v.Type.String()
+	default:
+		value := v.BadValue
+		valueType := reflect.TypeOf(value)
+		if value == nil || valueType == nil {
+			value = "null"
+		} else if valueType.Kind() == reflect.Ptr {
+			if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() {
+				value = "null"
+			} else {
+				value = reflectValue.Elem().Interface()
+			}
+		}
+		switch t := value.(type) {
+		case int64, int32, float64, float32, bool:
+			// use simple printer for simple types
+			s = fmt.Sprintf("%s: %v", v.Type, value)
+		case string:
+			s = fmt.Sprintf("%s: %q", v.Type, t)
+		case fmt.Stringer:
+			// anything that defines String() is better than raw struct
+			s = fmt.Sprintf("%s: %s", v.Type, t.String())
+		default:
+			// fallback to raw struct
+			// TODO: internal types have panic guards against json.Marshalling to prevent
+			// accidental use of internal types in external serialized form.  For now, use
+			// %#v, although it would be better to show a more expressive output in the future
+			s = fmt.Sprintf("%s: %#v", v.Type, value)
+		}
+	}
+	if len(v.Detail) != 0 {
+		s += fmt.Sprintf(": %s", v.Detail)
+	}
+	return s
+}
+
+// ErrorType is a machine readable value providing more detail about why
+// a field is invalid.  These values are expected to match 1-1 with
+// CauseType in api/types.go.
+type ErrorType string
+
+// TODO: These values are duplicated in api/types.go, but there's a circular dep.  Fix it.
+const (
+	// ErrorTypeNotFound is used to report failure to find a requested value
+	// (e.g. looking up an ID).  See NotFound().
+	ErrorTypeNotFound ErrorType = "FieldValueNotFound"
+	// ErrorTypeRequired is used to report required values that are not
+	// provided (e.g. empty strings, null values, or empty arrays).  See
+	// Required().
+	ErrorTypeRequired ErrorType = "FieldValueRequired"
+	// ErrorTypeDuplicate is used to report collisions of values that must be
+	// unique (e.g. unique IDs).  See Duplicate().
+	ErrorTypeDuplicate ErrorType = "FieldValueDuplicate"
+	// ErrorTypeInvalid is used to report malformed values (e.g. failed regex
+	// match, too long, out of bounds).  See Invalid().
+	ErrorTypeInvalid ErrorType = "FieldValueInvalid"
+	// ErrorTypeNotSupported is used to report unknown values for enumerated
+	// fields (e.g. a list of valid values).  See NotSupported().
+	ErrorTypeNotSupported ErrorType = "FieldValueNotSupported"
+	// ErrorTypeForbidden is used to report valid (as per formatting rules)
+	// values which would be accepted under some conditions, but which are not
+	// permitted by the current conditions (such as security policy).  See
+	// Forbidden().
+	ErrorTypeForbidden ErrorType = "FieldValueForbidden"
+	// ErrorTypeTooLong is used to report that the given value is too long.
+	// This is similar to ErrorTypeInvalid, but the error will not include the
+	// too-long value.  See TooLong().
+	ErrorTypeTooLong ErrorType = "FieldValueTooLong"
+	// ErrorTypeTooMany is used to report "too many". This is used to
+	// report that a given list has too many items. This is similar to FieldValueTooLong,
+	// but the error indicates quantity instead of length.
+	ErrorTypeTooMany ErrorType = "FieldValueTooMany"
+	// ErrorTypeInternal is used to report other errors that are not related
+	// to user input.  See InternalError().
+	ErrorTypeInternal ErrorType = "InternalError"
+)
+
+// String converts a ErrorType into its corresponding canonical error message.
+func (t ErrorType) String() string {
+	switch t {
+	case ErrorTypeNotFound:
+		return "Not found"
+	case ErrorTypeRequired:
+		return "Required value"
+	case ErrorTypeDuplicate:
+		return "Duplicate value"
+	case ErrorTypeInvalid:
+		return "Invalid value"
+	case ErrorTypeNotSupported:
+		return "Unsupported value"
+	case ErrorTypeForbidden:
+		return "Forbidden"
+	case ErrorTypeTooLong:
+		return "Too long"
+	case ErrorTypeTooMany:
+		return "Too many"
+	case ErrorTypeInternal:
+		return "Internal error"
+	default:
+		panic(fmt.Sprintf("unrecognized validation error: %q", string(t)))
+	}
+}
+
+// NotFound returns a *Error indicating "value not found".  This is
+// used to report failure to find a requested value (e.g. looking up an ID).
+func NotFound(field *Path, value interface{}) *Error {
+	return &Error{ErrorTypeNotFound, field.String(), value, ""}
+}
+
+// Required returns a *Error indicating "value required".  This is used
+// to report required values that are not provided (e.g. empty strings, null
+// values, or empty arrays).
+func Required(field *Path, detail string) *Error {
+	return &Error{ErrorTypeRequired, field.String(), "", detail}
+}
+
+// Duplicate returns a *Error indicating "duplicate value".  This is
+// used to report collisions of values that must be unique (e.g. names or IDs).
+func Duplicate(field *Path, value interface{}) *Error {
+	return &Error{ErrorTypeDuplicate, field.String(), value, ""}
+}
+
+// Invalid returns a *Error indicating "invalid value".  This is used
+// to report malformed values (e.g. failed regex match, too long, out of bounds).
+func Invalid(field *Path, value interface{}, detail string) *Error {
+	return &Error{ErrorTypeInvalid, field.String(), value, detail}
+}
+
+// NotSupported returns a *Error indicating "unsupported value".
+// This is used to report unknown values for enumerated fields (e.g. a list of
+// valid values).
+func NotSupported(field *Path, value interface{}, validValues []string) *Error {
+	detail := ""
+	if validValues != nil && len(validValues) > 0 {
+		quotedValues := make([]string, len(validValues))
+		for i, v := range validValues {
+			quotedValues[i] = strconv.Quote(v)
+		}
+		detail = "supported values: " + strings.Join(quotedValues, ", ")
+	}
+	return &Error{ErrorTypeNotSupported, field.String(), value, detail}
+}
+
+// Forbidden returns a *Error indicating "forbidden".  This is used to
+// report valid (as per formatting rules) values which would be accepted under
+// some conditions, but which are not permitted by current conditions (e.g.
+// security policy).
+func Forbidden(field *Path, detail string) *Error {
+	return &Error{ErrorTypeForbidden, field.String(), "", detail}
+}
+
+// TooLong returns a *Error indicating "too long".  This is used to
+// report that the given value is too long.  This is similar to
+// Invalid, but the returned error will not include the too-long
+// value.
+func TooLong(field *Path, value interface{}, maxLength int) *Error {
+	return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
+}
+
+// TooMany returns a *Error indicating "too many". This is used to
+// report that a given list has too many items. This is similar to TooLong,
+// but the returned error indicates quantity instead of length.
+func TooMany(field *Path, actualQuantity, maxQuantity int) *Error {
+	return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)}
+}
+
+// InternalError returns a *Error indicating "internal error".  This is used
+// to signal that an error was found that was not directly related to user
+// input.  The err argument must be non-nil.
+func InternalError(field *Path, err error) *Error {
+	return &Error{ErrorTypeInternal, field.String(), nil, err.Error()}
+}
+
+// ErrorList holds a set of Errors.  It is plausible that we might one day have
+// non-field errors in this same umbrella package, but for now we don't, so
+// we can keep it simple and leave ErrorList here.
+type ErrorList []*Error
+
+// NewErrorTypeMatcher returns an errors.Matcher that returns true
+// if the provided error is a Error and has the provided ErrorType.
+func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher {
+	return func(err error) bool {
+		if e, ok := err.(*Error); ok {
+			return e.Type == t
+		}
+		return false
+	}
+}
+
+// ToAggregate converts the ErrorList into an errors.Aggregate.
+func (list ErrorList) ToAggregate() utilerrors.Aggregate {
+	errs := make([]error, 0, len(list))
+	errorMsgs := sets.NewString()
+	for _, err := range list {
+		msg := fmt.Sprintf("%v", err)
+		if errorMsgs.Has(msg) {
+			continue
+		}
+		errorMsgs.Insert(msg)
+		errs = append(errs, err)
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+func fromAggregate(agg utilerrors.Aggregate) ErrorList {
+	errs := agg.Errors()
+	list := make(ErrorList, len(errs))
+	for i := range errs {
+		list[i] = errs[i].(*Error)
+	}
+	return list
+}
+
+// Filter removes items from the ErrorList that match the provided fns.
+func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList {
+	err := utilerrors.FilterOut(list.ToAggregate(), fns...)
+	if err == nil {
+		return nil
+	}
+	// FilterOut takes an Aggregate and returns an Aggregate
+	return fromAggregate(err.(utilerrors.Aggregate))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go
new file mode 100644
index 0000000..2efc8ee
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package field
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+)
+
+// Path represents the path from some root to a particular field.
+type Path struct {
+	name   string // the name of this field or "" if this is an index
+	index  string // if name == "", this is a subscript (index or map key) of the previous element
+	parent *Path  // nil if this is the root element
+}
+
+// NewPath creates a root Path object.
+func NewPath(name string, moreNames ...string) *Path {
+	r := &Path{name: name, parent: nil}
+	for _, anotherName := range moreNames {
+		r = &Path{name: anotherName, parent: r}
+	}
+	return r
+}
+
+// Root returns the root element of this Path.
+func (p *Path) Root() *Path {
+	for ; p.parent != nil; p = p.parent {
+		// Do nothing.
+	}
+	return p
+}
+
+// Child creates a new Path that is a child of the method receiver.
+func (p *Path) Child(name string, moreNames ...string) *Path {
+	r := NewPath(name, moreNames...)
+	r.Root().parent = p
+	return r
+}
+
+// Index indicates that the previous Path is to be subscripted by an int.
+// This sets the same underlying value as Key.
+func (p *Path) Index(index int) *Path {
+	return &Path{index: strconv.Itoa(index), parent: p}
+}
+
+// Key indicates that the previous Path is to be subscripted by a string.
+// This sets the same underlying value as Index.
+func (p *Path) Key(key string) *Path {
+	return &Path{index: key, parent: p}
+}
+
+// String produces a string representation of the Path.
+func (p *Path) String() string {
+	// make a slice to iterate
+	elems := []*Path{}
+	for ; p != nil; p = p.parent {
+		elems = append(elems, p)
+	}
+
+	// iterate, but it has to be backwards
+	buf := bytes.NewBuffer(nil)
+	for i := range elems {
+		p := elems[len(elems)-1-i]
+		if p.parent != nil && len(p.name) > 0 {
+			// This is either the root or it is a subscript.
+			buf.WriteString(".")
+		}
+		if len(p.name) > 0 {
+			buf.WriteString(p.name)
+		} else {
+			fmt.Fprintf(buf, "[%s]", p.index)
+		}
+	}
+	return buf.String()
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
new file mode 100644
index 0000000..4752b29
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -0,0 +1,503 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+	"math"
+	"net"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+const qnameCharFmt string = "[A-Za-z0-9]"
+const qnameExtCharFmt string = "[-A-Za-z0-9_.]"
+const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt
+const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+const qualifiedNameMaxLength int = 63
+
+var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$")
+
+// IsQualifiedName tests whether the value passed is what Kubernetes calls a
+// "qualified name".  This is a format used in various places throughout the
+// system.  If the value is not valid, a list of error strings is returned.
+// Otherwise an empty list (or nil) is returned.
+func IsQualifiedName(value string) []string {
+	var errs []string
+	parts := strings.Split(value, "/")
+	var name string
+	switch len(parts) {
+	case 1:
+		name = parts[0]
+	case 2:
+		var prefix string
+		prefix, name = parts[0], parts[1]
+		if len(prefix) == 0 {
+			errs = append(errs, "prefix part "+EmptyError())
+		} else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
+			errs = append(errs, prefixEach(msgs, "prefix part ")...)
+		}
+	default:
+		return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+
+			" with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')")
+	}
+
+	if len(name) == 0 {
+		errs = append(errs, "name part "+EmptyError())
+	} else if len(name) > qualifiedNameMaxLength {
+		errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength))
+	}
+	if !qualifiedNameRegexp.MatchString(name) {
+		errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc"))
+	}
+	return errs
+}
+
+// IsFullyQualifiedName checks if the name is fully qualified. This is similar
+// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of
+// 2 and does not accept a trailing . as valid.
+// TODO: This function is deprecated and preserved until all callers migrate to
+// IsFullyQualifiedDomainName; please don't add new callers.
+func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList {
+	var allErrors field.ErrorList
+	if len(name) == 0 {
+		return append(allErrors, field.Required(fldPath, ""))
+	}
+	if errs := IsDNS1123Subdomain(name); len(errs) > 0 {
+		return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ",")))
+	}
+	if len(strings.Split(name, ".")) < 3 {
+		return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots"))
+	}
+	return allErrors
+}
+
+// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This
+// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments
+// instead of 3 and accepts a trailing . as valid.
+func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList {
+	var allErrors field.ErrorList
+	if len(name) == 0 {
+		return append(allErrors, field.Required(fldPath, ""))
+	}
+	if strings.HasSuffix(name, ".") {
+		name = name[:len(name)-1]
+	}
+	if errs := IsDNS1123Subdomain(name); len(errs) > 0 {
+		return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ",")))
+	}
+	if len(strings.Split(name, ".")) < 2 {
+		return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots"))
+	}
+	for _, label := range strings.Split(name, ".") {
+		if errs := IsDNS1123Label(label); len(errs) > 0 {
+			return append(allErrors, field.Invalid(fldPath, label, strings.Join(errs, ",")))
+		}
+	}
+	return allErrors
+}
+
+// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may
+// contain:
+// * unreserved characters (alphanumeric, '-', '.', '_', '~')
+// * percent-encoded octets
+// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=")
+// * a colon character (":")
+const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+`
+
+var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$")
+
+// IsDomainPrefixedPath checks if the given string is a domain-prefixed path
+// (e.g. acme.io/foo). All characters before the first "/" must be a valid
+// subdomain as defined by RFC 1123. All characters trailing the first "/" must
+// be valid HTTP Path characters as defined by RFC 3986.
+func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList {
+	var allErrs field.ErrorList
+	if len(dpPath) == 0 {
+		return append(allErrs, field.Required(fldPath, ""))
+	}
+
+	segments := strings.SplitN(dpPath, "/", 2)
+	if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 {
+		return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")"))
+	}
+
+	host := segments[0]
+	for _, err := range IsDNS1123Subdomain(host) {
+		allErrs = append(allErrs, field.Invalid(fldPath, host, err))
+	}
+
+	path := segments[1]
+	if !httpPathRegexp.MatchString(path) {
+		return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt)))
+	}
+
+	return allErrs
+}
+
+const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
+const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+
+// LabelValueMaxLength is a label's max length
+const LabelValueMaxLength int = 63
+
+var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+
+// IsValidLabelValue tests whether the value passed is a valid label value.  If
+// the value is not valid, a list of error strings is returned.  Otherwise an
+// empty list (or nil) is returned.
+func IsValidLabelValue(value string) []string {
+	var errs []string
+	if len(value) > LabelValueMaxLength {
+		errs = append(errs, MaxLenError(LabelValueMaxLength))
+	}
+	if !labelValueRegexp.MatchString(value) {
+		errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345"))
+	}
+	return errs
+}
+
+const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
+
+// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
+const DNS1123LabelMaxLength int = 63
+
+var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
+
+// IsDNS1123Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1123).
+func IsDNS1123Label(value string) []string {
+	var errs []string
+	if len(value) > DNS1123LabelMaxLength {
+		errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
+	}
+	if !dns1123LabelRegexp.MatchString(value) {
+		errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
+	}
+	return errs
+}
+
+const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
+const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+
+// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
+const DNS1123SubdomainMaxLength int = 253
+
+var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+
+// IsDNS1123Subdomain tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123).
+func IsDNS1123Subdomain(value string) []string {
+	var errs []string
+	if len(value) > DNS1123SubdomainMaxLength {
+		errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+	}
+	if !dns1123SubdomainRegexp.MatchString(value) {
+		errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com"))
+	}
+	return errs
+}
+
+const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
+const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
+
+// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035)
+const DNS1035LabelMaxLength int = 63
+
+var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$")
+
+// IsDNS1035Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1035).
+func IsDNS1035Label(value string) []string {
+	var errs []string
+	if len(value) > DNS1035LabelMaxLength {
+		errs = append(errs, MaxLenError(DNS1035LabelMaxLength))
+	}
+	if !dns1035LabelRegexp.MatchString(value) {
+		errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123"))
+	}
+	return errs
+}
+
+// wildcard definition - RFC 1034 section 4.3.3.
+// examples:
+// - valid: *.bar.com, *.foo.bar.com
+// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, *
+const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt
+const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character"
+
+// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a
+// wildcard subdomain in DNS (RFC 1034 section 4.3.3).
+func IsWildcardDNS1123Subdomain(value string) []string {
+	wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$")
+
+	var errs []string
+	if len(value) > DNS1123SubdomainMaxLength {
+		errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+	}
+	if !wildcardDNS1123SubdomainRegexp.MatchString(value) {
+		errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com"))
+	}
+	return errs
+}
+
+const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
+const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'"
+
+var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
+
+// IsCIdentifier tests for a string that conforms the definition of an identifier
+// in C. This checks the format, but not the length.
+func IsCIdentifier(value string) []string {
+	if !cIdentifierRegexp.MatchString(value) {
+		return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
+	}
+	return nil
+}
+
+// IsValidPortNum tests that the argument is a valid, non-zero port number.
+func IsValidPortNum(port int) []string {
+	if 1 <= port && port <= 65535 {
+		return nil
+	}
+	return []string{InclusiveRangeError(1, 65535)}
+}
+
+// IsInRange tests that the argument is in an inclusive range.
+func IsInRange(value int, min int, max int) []string {
+	if value >= min && value <= max {
+		return nil
+	}
+	return []string{InclusiveRangeError(min, max)}
+}
+
+// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1
+// TODO: once we have a type for UID/GID we should make these that type.
+const (
+	minUserID  = 0
+	maxUserID  = math.MaxInt32
+	minGroupID = 0
+	maxGroupID = math.MaxInt32
+)
+
+// IsValidGroupID tests that the argument is a valid Unix GID.
+func IsValidGroupID(gid int64) []string {
+	if minGroupID <= gid && gid <= maxGroupID {
+		return nil
+	}
+	return []string{InclusiveRangeError(minGroupID, maxGroupID)}
+}
+
+// IsValidUserID tests that the argument is a valid Unix UID.
+func IsValidUserID(uid int64) []string {
+	if minUserID <= uid && uid <= maxUserID {
+		return nil
+	}
+	return []string{InclusiveRangeError(minUserID, maxUserID)}
+}
+
+var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$")
+var portNameOneLetterRegexp = regexp.MustCompile("[a-z]")
+
+// IsValidPortName check that the argument is valid syntax. It must be
+// non-empty and no more than 15 characters long. It may contain only [-a-z0-9]
+// and must contain at least one letter [a-z]. It must not start or end with a
+// hyphen, nor contain adjacent hyphens.
+//
+// Note: We only allow lower-case characters, even though RFC 6335 is case
+// insensitive.
+func IsValidPortName(port string) []string {
+	var errs []string
+	if len(port) > 15 {
+		errs = append(errs, MaxLenError(15))
+	}
+	if !portNameCharsetRegex.MatchString(port) {
+		errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)")
+	}
+	if !portNameOneLetterRegexp.MatchString(port) {
+		errs = append(errs, "must contain at least one letter or number (a-z, 0-9)")
+	}
+	if strings.Contains(port, "--") {
+		errs = append(errs, "must not contain consecutive hyphens")
+	}
+	if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') {
+		errs = append(errs, "must not begin or end with a hyphen")
+	}
+	return errs
+}
+
+// IsValidIP tests that the argument is a valid IP address.
+func IsValidIP(value string) []string {
+	if net.ParseIP(value) == nil {
+		return []string{"must be a valid IP address, (e.g. 10.9.8.7)"}
+	}
+	return nil
+}
+
+// IsValidIPv4Address tests that the argument is a valid IPv4 address.
+func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList {
+	var allErrors field.ErrorList
+	ip := net.ParseIP(value)
+	if ip == nil || ip.To4() == nil {
+		allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address"))
+	}
+	return allErrors
+}
+
+// IsValidIPv6Address tests that the argument is a valid IPv6 address.
+func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList {
+	var allErrors field.ErrorList
+	ip := net.ParseIP(value)
+	if ip == nil || ip.To4() != nil {
+		allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address"))
+	}
+	return allErrors
+}
+
+const percentFmt string = "[0-9]+%"
+const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'"
+
+var percentRegexp = regexp.MustCompile("^" + percentFmt + "$")
+
+// IsValidPercent checks that string is in the form of a percentage
+func IsValidPercent(percent string) []string {
+	if !percentRegexp.MatchString(percent) {
+		return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")}
+	}
+	return nil
+}
+
+const httpHeaderNameFmt string = "[-A-Za-z0-9]+"
+const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'"
+
+var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$")
+
+// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's
+// definition of a valid header field name (a stricter subset than RFC7230).
+func IsHTTPHeaderName(value string) []string {
+	if !httpHeaderNameRegexp.MatchString(value) {
+		return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")}
+	}
+	return nil
+}
+
+const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*"
+const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit"
+
+var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$")
+
+// IsEnvVarName tests if a string is a valid environment variable name.
+func IsEnvVarName(value string) []string {
+	var errs []string
+	if !envVarNameRegexp.MatchString(value) {
+		errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1"))
+	}
+
+	errs = append(errs, hasChDirPrefix(value)...)
+	return errs
+}
+
+const configMapKeyFmt = `[-._a-zA-Z0-9]+`
+const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'"
+
+var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$")
+
+// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret
+func IsConfigMapKey(value string) []string {
+	var errs []string
+	if len(value) > DNS1123SubdomainMaxLength {
+		errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+	}
+	if !configMapKeyRegexp.MatchString(value) {
+		errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name"))
+	}
+	errs = append(errs, hasChDirPrefix(value)...)
+	return errs
+}
+
+// MaxLenError returns a string explanation of a "string too long" validation
+// failure.
+func MaxLenError(length int) string {
+	return fmt.Sprintf("must be no more than %d characters", length)
+}
+
+// RegexError returns a string explanation of a regex validation failure.
+func RegexError(msg string, fmt string, examples ...string) string {
+	if len(examples) == 0 {
+		return msg + " (regex used for validation is '" + fmt + "')"
+	}
+	msg += " (e.g. "
+	for i := range examples {
+		if i > 0 {
+			msg += " or "
+		}
+		msg += "'" + examples[i] + "', "
+	}
+	msg += "regex used for validation is '" + fmt + "')"
+	return msg
+}
+
+// EmptyError returns a string explanation of a "must not be empty" validation
+// failure.
+func EmptyError() string {
+	return "must be non-empty"
+}
+
+func prefixEach(msgs []string, prefix string) []string {
+	for i := range msgs {
+		msgs[i] = prefix + msgs[i]
+	}
+	return msgs
+}
+
+// InclusiveRangeError returns a string explanation of a numeric "must be
+// between" validation failure.
+func InclusiveRangeError(lo, hi int) string {
+	return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi)
+}
+
+func hasChDirPrefix(value string) []string {
+	var errs []string
+	switch {
+	case value == ".":
+		errs = append(errs, `must not be '.'`)
+	case value == "..":
+		errs = append(errs, `must not be '..'`)
+	case strings.HasPrefix(value, ".."):
+		errs = append(errs, `must not start with '..'`)
+	}
+	return errs
+}
+
+// IsValidSocketAddr checks that string represents a valid socket address
+// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254))
+func IsValidSocketAddr(value string) []string {
+	var errs []string
+	ip, port, err := net.SplitHostPort(value)
+	if err != nil {
+		errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)")
+		return errs
+	}
+	portInt, _ := strconv.Atoi(port)
+	errs = append(errs, IsValidPortNum(portInt)...)
+	errs = append(errs, IsValidIP(ip)...)
+	return errs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
new file mode 100644
index 0000000..3f0c968
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package wait provides tools for polling or listening for changes
+// to a condition.
+package wait // import "k8s.io/apimachinery/pkg/util/wait"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
new file mode 100644
index 0000000..d759d91
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -0,0 +1,606 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"context"
+	"errors"
+	"math"
+	"math/rand"
+	"sync"
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/clock"
+	"k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// For any test of the style:
+//   ...
+//   <- time.After(timeout):
+//      t.Errorf("Timed out")
+// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
+// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
+// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
+var ForeverTestTimeout = time.Second * 30
+
+// NeverStop may be passed to Until to make it never stop.
+var NeverStop <-chan struct{} = make(chan struct{})
+
+// Group allows to start a group of goroutines and wait for their completion.
+type Group struct {
+	wg sync.WaitGroup
+}
+
+func (g *Group) Wait() {
+	g.wg.Wait()
+}
+
+// StartWithChannel starts f in a new goroutine in the group.
+// stopCh is passed to f as an argument. f should stop when stopCh is available.
+func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) {
+	g.Start(func() {
+		f(stopCh)
+	})
+}
+
+// StartWithContext starts f in a new goroutine in the group.
+// ctx is passed to f as an argument. f should stop when ctx.Done() is available.
+func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) {
+	g.Start(func() {
+		f(ctx)
+	})
+}
+
+// Start starts f in a new goroutine in the group.
+func (g *Group) Start(f func()) {
+	g.wg.Add(1)
+	go func() {
+		defer g.wg.Done()
+		f()
+	}()
+}
+
+// Forever calls f every period for ever.
+//
+// Forever is syntactic sugar on top of Until.
+func Forever(f func(), period time.Duration) {
+	Until(f, period, NeverStop)
+}
+
+// Until loops until stop channel is closed, running f every period.
+//
+// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
+// with sliding = true (which means the timer for period starts after the f
+// completes).
+func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
+	JitterUntil(f, period, 0.0, true, stopCh)
+}
+
+// UntilWithContext loops until context is done, running f every period.
+//
+// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
+// with zero jitter factor and with sliding = true (which means the timer
+// for period starts after the f completes).
+func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
+	JitterUntilWithContext(ctx, f, period, 0.0, true)
+}
+
+// NonSlidingUntil loops until stop channel is closed, running f every
+// period.
+//
+// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
+// factor, with sliding = false (meaning the timer for period starts at the same
+// time as the function starts).
+func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
+	JitterUntil(f, period, 0.0, false, stopCh)
+}
+
+// NonSlidingUntilWithContext loops until context is done, running f every
+// period.
+//
+// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
+// with zero jitter factor, with sliding = false (meaning the timer for period
+// starts at the same time as the function starts).
+func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
+	JitterUntilWithContext(ctx, f, period, 0.0, false)
+}
+
+// JitterUntil loops until stop channel is closed, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Close stopCh to stop. f may not be invoked if stop channel is already
+// closed. Pass NeverStop to if you don't want it stop.
+func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
+	BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
+}
+
+// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
+	var t clock.Timer
+	for {
+		select {
+		case <-stopCh:
+			return
+		default:
+		}
+
+		if !sliding {
+			t = backoff.Backoff()
+		}
+
+		func() {
+			defer runtime.HandleCrash()
+			f()
+		}()
+
+		if sliding {
+			t = backoff.Backoff()
+		}
+
+		// NOTE: b/c there is no priority selection in golang
+		// it is possible for this to race, meaning we could
+		// trigger t.C and stopCh, and t.C select falls through.
+		// In order to mitigate we re-check stopCh at the beginning
+		// of every loop to prevent extra executions of f().
+		select {
+		case <-stopCh:
+			return
+		case <-t.C():
+		}
+	}
+}
+
+// JitterUntilWithContext loops until context is done, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Cancel context to stop. f may not be invoked if context is already expired.
+func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
+	JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
+}
+
+// Jitter returns a time.Duration between duration and duration + maxFactor *
+// duration.
+//
+// This allows clients to avoid converging on periodic behavior. If maxFactor
+// is 0.0, a suggested default value will be chosen.
+func Jitter(duration time.Duration, maxFactor float64) time.Duration {
+	if maxFactor <= 0.0 {
+		maxFactor = 1.0
+	}
+	wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
+	return wait
+}
+
+// ErrWaitTimeout is returned when the condition exited without success.
+var ErrWaitTimeout = errors.New("timed out waiting for the condition")
+
+// ConditionFunc returns true if the condition is satisfied, or an error
+// if the loop should be aborted.
+type ConditionFunc func() (done bool, err error)
+
+// runConditionWithCrashProtection runs a ConditionFunc with crash protection
+func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
+	defer runtime.HandleCrash()
+	return condition()
+}
+
+// Backoff holds parameters applied to a Backoff function.
+type Backoff struct {
+	// The initial duration.
+	Duration time.Duration
+	// Duration is multiplied by factor each iteration, if factor is not zero
+	// and the limits imposed by Steps and Cap have not been reached.
+	// Should not be negative.
+	// The jitter does not contribute to the updates to the duration parameter.
+	Factor float64
+	// The sleep at each iteration is the duration plus an additional
+	// amount chosen uniformly at random from the interval between
+	// zero and `jitter*duration`.
+	Jitter float64
+	// The remaining number of iterations in which the duration
+	// parameter may change (but progress can be stopped earlier by
+	// hitting the cap). If not positive, the duration is not
+	// changed. Used for exponential backoff in combination with
+	// Factor and Cap.
+	Steps int
+	// A limit on revised values of the duration parameter. If a
+	// multiplication by the factor parameter would make the duration
+	// exceed the cap then the duration is set to the cap and the
+	// steps parameter is set to zero.
+	Cap time.Duration
+}
+
+// Step (1) returns an amount of time to sleep determined by the
+// original Duration and Jitter and (2) mutates the provided Backoff
+// to update its Steps and Duration.
+func (b *Backoff) Step() time.Duration {
+	if b.Steps < 1 {
+		if b.Jitter > 0 {
+			return Jitter(b.Duration, b.Jitter)
+		}
+		return b.Duration
+	}
+	b.Steps--
+
+	duration := b.Duration
+
+	// calculate the next step
+	if b.Factor != 0 {
+		b.Duration = time.Duration(float64(b.Duration) * b.Factor)
+		if b.Cap > 0 && b.Duration > b.Cap {
+			b.Duration = b.Cap
+			b.Steps = 0
+		}
+	}
+
+	if b.Jitter > 0 {
+		duration = Jitter(duration, b.Jitter)
+	}
+	return duration
+}
+
+// contextForChannel derives a child context from a parent channel.
+//
+// The derived context's Done channel is closed when the returned cancel function
+// is called or when the parent channel is closed, whichever happens first.
+//
+// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked.
+func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
+	ctx, cancel := context.WithCancel(context.Background())
+
+	go func() {
+		select {
+		case <-parentCh:
+			cancel()
+		case <-ctx.Done():
+		}
+	}()
+	return ctx, cancel
+}
+
+// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
+// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff()
+// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in
+// undetermined behavior.
+// The BackoffManager is supposed to be called in a single-threaded environment.
+type BackoffManager interface {
+	Backoff() clock.Timer
+}
+
+type exponentialBackoffManagerImpl struct {
+	backoff              *Backoff
+	backoffTimer         clock.Timer
+	lastBackoffStart     time.Time
+	initialBackoff       time.Duration
+	backoffResetDuration time.Duration
+	clock                clock.Clock
+}
+
+// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
+// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
+// This backoff manager is used to reduce load during upstream unhealthiness.
+func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
+	return &exponentialBackoffManagerImpl{
+		backoff: &Backoff{
+			Duration: initBackoff,
+			Factor:   backoffFactor,
+			Jitter:   jitter,
+
+			// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
+			// what we ideally need here, we set it to max int and assume we will never use up the steps
+			Steps: math.MaxInt32,
+			Cap:   maxBackoff,
+		},
+		backoffTimer:         nil,
+		initialBackoff:       initBackoff,
+		lastBackoffStart:     c.Now(),
+		backoffResetDuration: resetDuration,
+		clock:                c,
+	}
+}
+
+func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
+	if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
+		b.backoff.Steps = math.MaxInt32
+		b.backoff.Duration = b.initialBackoff
+	}
+	b.lastBackoffStart = b.clock.Now()
+	return b.backoff.Step()
+}
+
+// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
+// The returned timer must be drained before calling Backoff() the second time
+func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
+	if b.backoffTimer == nil {
+		b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
+	} else {
+		b.backoffTimer.Reset(b.getNextBackoff())
+	}
+	return b.backoffTimer
+}
+
+type jitteredBackoffManagerImpl struct {
+	clock        clock.Clock
+	duration     time.Duration
+	jitter       float64
+	backoffTimer clock.Timer
+}
+
+// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
+// is negative, backoff will not be jittered.
+func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
+	return &jitteredBackoffManagerImpl{
+		clock:        c,
+		duration:     duration,
+		jitter:       jitter,
+		backoffTimer: nil,
+	}
+}
+
+func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
+	jitteredPeriod := j.duration
+	if j.jitter > 0.0 {
+		jitteredPeriod = Jitter(j.duration, j.jitter)
+	}
+	return jitteredPeriod
+}
+
+// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
+// The returned timer must be drained before calling Backoff() the second time
+func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
+	backoff := j.getNextBackoff()
+	if j.backoffTimer == nil {
+		j.backoffTimer = j.clock.NewTimer(backoff)
+	} else {
+		j.backoffTimer.Reset(backoff)
+	}
+	return j.backoffTimer
+}
+
+// ExponentialBackoff repeats a condition check with exponential backoff.
+//
+// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
+// to determine the length of the sleep and adjust Duration and Steps.
+// Stops and returns as soon as:
+// 1. the condition check returns true or an error,
+// 2. `backoff.Steps` checks of the condition have been done, or
+// 3. a sleep truncated by the cap on duration has been completed.
+// In case (1) the returned error is what the condition function returned.
+// In all other cases, ErrWaitTimeout is returned.
+func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
+	for backoff.Steps > 0 {
+		if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
+			return err
+		}
+		if backoff.Steps == 1 {
+			break
+		}
+		time.Sleep(backoff.Step())
+	}
+	return ErrWaitTimeout
+}
+
+// Poll tries a condition func until it returns true, an error, or the timeout
+// is reached.
+//
+// Poll always waits the interval before the run of 'condition'.
+// 'condition' will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to Poll something forever, see PollInfinite.
+func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
+	return pollInternal(poller(interval, timeout), condition)
+}
+
+func pollInternal(wait WaitFunc, condition ConditionFunc) error {
+	done := make(chan struct{})
+	defer close(done)
+	return WaitFor(wait, condition, done)
+}
+
+// PollImmediate tries a condition func until it returns true, an error, or the timeout
+// is reached.
+//
+// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
+// will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to immediately Poll something forever, see PollImmediateInfinite.
+func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
+	return pollImmediateInternal(poller(interval, timeout), condition)
+}
+
+func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
+	done, err := runConditionWithCrashProtection(condition)
+	if err != nil {
+		return err
+	}
+	if done {
+		return nil
+	}
+	return pollInternal(wait, condition)
+}
+
+// PollInfinite tries a condition func until it returns true or an error
+//
+// PollInfinite always waits the interval before the run of 'condition'.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+func PollInfinite(interval time.Duration, condition ConditionFunc) error {
+	done := make(chan struct{})
+	defer close(done)
+	return PollUntil(interval, condition, done)
+}
+
+// PollImmediateInfinite tries a condition func until it returns true or an error
+//
+// PollImmediateInfinite runs the 'condition' before waiting for the interval.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
+	done, err := runConditionWithCrashProtection(condition)
+	if err != nil {
+		return err
+	}
+	if done {
+		return nil
+	}
+	return PollInfinite(interval, condition)
+}
+
+// PollUntil tries a condition func until it returns true, an error or stopCh is
+// closed.
+//
+// PollUntil always waits interval before the first run of 'condition'.
+// 'condition' will always be invoked at least once.
+func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+	ctx, cancel := contextForChannel(stopCh)
+	defer cancel()
+	return WaitFor(poller(interval, 0), condition, ctx.Done())
+}
+
+// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
+//
+// PollImmediateUntil runs the 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
+func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+	done, err := condition()
+	if err != nil {
+		return err
+	}
+	if done {
+		return nil
+	}
+	select {
+	case <-stopCh:
+		return ErrWaitTimeout
+	default:
+		return PollUntil(interval, condition, stopCh)
+	}
+}
+
+// WaitFunc creates a channel that receives an item every time a test
+// should be executed and is closed when the last test should be invoked.
+type WaitFunc func(done <-chan struct{}) <-chan struct{}
+
+// WaitFor continually checks 'fn' as driven by 'wait'.
+//
+// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
+// placed on the channel and once more when the channel is closed. If the channel is closed
+// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout.
+//
+// If 'fn' returns an error the loop ends and that error is returned. If
+// 'fn' returns true the loop ends and nil is returned.
+//
+// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever
+// returning true.
+//
+// When the done channel is closed, because the golang `select` statement is
+// "uniform pseudo-random", the `fn` might still run one or multiple time,
+// though eventually `WaitFor` will return.
+func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
+	stopCh := make(chan struct{})
+	defer close(stopCh)
+	c := wait(stopCh)
+	for {
+		select {
+		case _, open := <-c:
+			ok, err := runConditionWithCrashProtection(fn)
+			if err != nil {
+				return err
+			}
+			if ok {
+				return nil
+			}
+			if !open {
+				return ErrWaitTimeout
+			}
+		case <-done:
+			return ErrWaitTimeout
+		}
+	}
+}
+
+// poller returns a WaitFunc that will send to the channel every interval until
+// timeout has elapsed and then closes the channel.
+//
+// Over very short intervals you may receive no ticks before the channel is
+// closed. A timeout of 0 is interpreted as an infinity, and in such a case
+// it would be the caller's responsibility to close the done channel.
+// Failure to do so would result in a leaked goroutine.
+//
+// Output ticks are not buffered. If the channel is not ready to receive an
+// item, the tick is skipped.
+func poller(interval, timeout time.Duration) WaitFunc {
+	return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
+		ch := make(chan struct{})
+
+		go func() {
+			defer close(ch)
+
+			tick := time.NewTicker(interval)
+			defer tick.Stop()
+
+			var after <-chan time.Time
+			if timeout != 0 {
+				// time.After is more convenient, but it
+				// potentially leaves timers around much longer
+				// than necessary if we exit early.
+				timer := time.NewTimer(timeout)
+				after = timer.C
+				defer timer.Stop()
+			}
+
+			for {
+				select {
+				case <-tick.C:
+					// If the consumer isn't ready for this signal drop it and
+					// check the other channels.
+					select {
+					case ch <- struct{}{}:
+					default:
+					}
+				case <-after:
+					return
+				case <-done:
+					return
+				}
+			}
+		}()
+
+		return ch
+	})
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
new file mode 100644
index 0000000..492171f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -0,0 +1,348 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package yaml
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"strings"
+	"unicode"
+
+	"k8s.io/klog/v2"
+	"sigs.k8s.io/yaml"
+)
+
+// ToJSON converts a single YAML document into a JSON document
+// or returns an error. If the document appears to be JSON the
+// YAML decoding path is not used (so that error messages are
+// JSON specific).
+func ToJSON(data []byte) ([]byte, error) {
+	if hasJSONPrefix(data) {
+		return data, nil
+	}
+	return yaml.YAMLToJSON(data)
+}
+
+// YAMLToJSONDecoder decodes YAML documents from an io.Reader by
+// separating individual documents. It first converts the YAML
+// body to JSON, then unmarshals the JSON.
+type YAMLToJSONDecoder struct {
+	reader Reader
+}
+
+// NewYAMLToJSONDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk, converting it to JSON via
+// yaml.YAMLToJSON, and then passing it to json.Decoder.
+func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder {
+	reader := bufio.NewReader(r)
+	return &YAMLToJSONDecoder{
+		reader: NewYAMLReader(reader),
+	}
+}
+
+// Decode reads a YAML document as JSON from the stream or returns
+// an error. The decoding rules match json.Unmarshal, not
+// yaml.Unmarshal.
+func (d *YAMLToJSONDecoder) Decode(into interface{}) error {
+	bytes, err := d.reader.Read()
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	if len(bytes) != 0 {
+		err := yaml.Unmarshal(bytes, into)
+		if err != nil {
+			return YAMLSyntaxError{err}
+		}
+	}
+	return err
+}
+
+// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if
+// the data is not sufficient.
+type YAMLDecoder struct {
+	r         io.ReadCloser
+	scanner   *bufio.Scanner
+	remaining []byte
+}
+
+// NewDocumentDecoder decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk. io.ErrShortBuffer will be
+// returned if the entire buffer could not be read to assist
+// the caller in framing the chunk.
+func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser {
+	scanner := bufio.NewScanner(r)
+	// the size of initial allocation for buffer 4k
+	buf := make([]byte, 4*1024)
+	// the maximum size used to buffer a token 5M
+	scanner.Buffer(buf, 5*1024*1024)
+	scanner.Split(splitYAMLDocument)
+	return &YAMLDecoder{
+		r:       r,
+		scanner: scanner,
+	}
+}
+
+// Read reads the previous slice into the buffer, or attempts to read
+// the next chunk.
+// TODO: switch to readline approach.
+func (d *YAMLDecoder) Read(data []byte) (n int, err error) {
+	left := len(d.remaining)
+	if left == 0 {
+		// return the next chunk from the stream
+		if !d.scanner.Scan() {
+			err := d.scanner.Err()
+			if err == nil {
+				err = io.EOF
+			}
+			return 0, err
+		}
+		out := d.scanner.Bytes()
+		d.remaining = out
+		left = len(out)
+	}
+
+	// fits within data
+	if left <= len(data) {
+		copy(data, d.remaining)
+		d.remaining = nil
+		return left, nil
+	}
+
+	// caller will need to reread
+	copy(data, d.remaining[:len(data)])
+	d.remaining = d.remaining[len(data):]
+	return len(data), io.ErrShortBuffer
+}
+
+func (d *YAMLDecoder) Close() error {
+	return d.r.Close()
+}
+
+const yamlSeparator = "\n---"
+const separator = "---"
+
+// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
+func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
+	if atEOF && len(data) == 0 {
+		return 0, nil, nil
+	}
+	sep := len([]byte(yamlSeparator))
+	if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
+		// We have a potential document terminator
+		i += sep
+		after := data[i:]
+		if len(after) == 0 {
+			// we can't read any more characters
+			if atEOF {
+				return len(data), data[:len(data)-sep], nil
+			}
+			return 0, nil, nil
+		}
+		if j := bytes.IndexByte(after, '\n'); j >= 0 {
+			return i + j + 1, data[0 : i-sep], nil
+		}
+		return 0, nil, nil
+	}
+	// If we're at EOF, we have a final, non-terminated line. Return it.
+	if atEOF {
+		return len(data), data, nil
+	}
+	// Request more data.
+	return 0, nil, nil
+}
+
+// decoder is a convenience interface for Decode.
+type decoder interface {
+	Decode(into interface{}) error
+}
+
+// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or
+// YAML documents by sniffing for a leading { character.
+type YAMLOrJSONDecoder struct {
+	r          io.Reader
+	bufferSize int
+
+	decoder decoder
+	rawData []byte
+}
+
+type JSONSyntaxError struct {
+	Line int
+	Err  error
+}
+
+func (e JSONSyntaxError) Error() string {
+	return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error())
+}
+
+type YAMLSyntaxError struct {
+	err error
+}
+
+func (e YAMLSyntaxError) Error() string {
+	return e.err.Error()
+}
+
+// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents
+// or JSON documents from the given reader as a stream. bufferSize determines
+// how far into the stream the decoder will look to figure out whether this
+// is a JSON stream (has whitespace followed by an open brace).
+func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder {
+	return &YAMLOrJSONDecoder{
+		r:          r,
+		bufferSize: bufferSize,
+	}
+}
+
+// Decode unmarshals the next object from the underlying stream into the
+// provide object, or returns an error.
+func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
+	if d.decoder == nil {
+		buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
+		if isJSON {
+			d.decoder = json.NewDecoder(buffer)
+			d.rawData = origData
+		} else {
+			d.decoder = NewYAMLToJSONDecoder(buffer)
+		}
+	}
+	err := d.decoder.Decode(into)
+	if jsonDecoder, ok := d.decoder.(*json.Decoder); ok {
+		if syntax, ok := err.(*json.SyntaxError); ok {
+			data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
+			if readErr != nil {
+				klog.V(4).Infof("reading stream failed: %v", readErr)
+			}
+			js := string(data)
+
+			// if contents from io.Reader are not complete,
+			// use the original raw data to prevent panic
+			if int64(len(js)) <= syntax.Offset {
+				js = string(d.rawData)
+			}
+
+			start := strings.LastIndex(js[:syntax.Offset], "\n") + 1
+			line := strings.Count(js[:start], "\n")
+			return JSONSyntaxError{
+				Line: line,
+				Err:  fmt.Errorf(syntax.Error()),
+			}
+		}
+	}
+	return err
+}
+
+type Reader interface {
+	Read() ([]byte, error)
+}
+
+type YAMLReader struct {
+	reader Reader
+}
+
+func NewYAMLReader(r *bufio.Reader) *YAMLReader {
+	return &YAMLReader{
+		reader: &LineReader{reader: r},
+	}
+}
+
+// Read returns a full YAML document.
+func (r *YAMLReader) Read() ([]byte, error) {
+	var buffer bytes.Buffer
+	for {
+		line, err := r.reader.Read()
+		if err != nil && err != io.EOF {
+			return nil, err
+		}
+
+		sep := len([]byte(separator))
+		if i := bytes.Index(line, []byte(separator)); i == 0 {
+			// We have a potential document terminator
+			i += sep
+			after := line[i:]
+			if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
+				if buffer.Len() != 0 {
+					return buffer.Bytes(), nil
+				}
+				if err == io.EOF {
+					return nil, err
+				}
+			}
+		}
+		if err == io.EOF {
+			if buffer.Len() != 0 {
+				// If we're at EOF, we have a final, non-terminated line. Return it.
+				return buffer.Bytes(), nil
+			}
+			return nil, err
+		}
+		buffer.Write(line)
+	}
+}
+
+type LineReader struct {
+	reader *bufio.Reader
+}
+
+// Read returns a single line (with '\n' ended) from the underlying reader.
+// An error is returned iff there is an error with the underlying reader.
+func (r *LineReader) Read() ([]byte, error) {
+	var (
+		isPrefix bool  = true
+		err      error = nil
+		line     []byte
+		buffer   bytes.Buffer
+	)
+
+	for isPrefix && err == nil {
+		line, isPrefix, err = r.reader.ReadLine()
+		buffer.Write(line)
+	}
+	buffer.WriteByte('\n')
+	return buffer.Bytes(), err
+}
+
+// GuessJSONStream scans the provided reader up to size, looking
+// for an open brace indicating this is JSON. It will return the
+// bufio.Reader it creates for the consumer.
+func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) {
+	buffer := bufio.NewReaderSize(r, size)
+	b, _ := buffer.Peek(size)
+	return buffer, b, hasJSONPrefix(b)
+}
+
+var jsonPrefix = []byte("{")
+
+// hasJSONPrefix returns true if the provided buffer appears to start with
+// a JSON open brace.
+func hasJSONPrefix(buf []byte) bool {
+	return hasPrefix(buf, jsonPrefix)
+}
+
+// Return true if the first non-whitespace bytes in buf is
+// prefix.
+func hasPrefix(buf []byte, prefix []byte) bool {
+	trim := bytes.TrimLeftFunc(buf, unicode.IsSpace)
+	return bytes.HasPrefix(trim, prefix)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go
new file mode 100644
index 0000000..29574fd
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+// Package version supplies the type for version information collected at build time.
+package version // import "k8s.io/apimachinery/pkg/version"
diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
new file mode 100644
index 0000000..5e041d6
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/helpers.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type versionType int
+
+const (
+	// Bigger the version type number, higher priority it is
+	versionTypeAlpha versionType = iota
+	versionTypeBeta
+	versionTypeGA
+)
+
+var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$")
+
+func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) {
+	var err error
+	submatches := kubeVersionRegex.FindStringSubmatch(v)
+	if len(submatches) != 4 {
+		return 0, 0, 0, false
+	}
+	switch submatches[2] {
+	case "alpha":
+		vType = versionTypeAlpha
+	case "beta":
+		vType = versionTypeBeta
+	case "":
+		vType = versionTypeGA
+	default:
+		return 0, 0, 0, false
+	}
+	if majorVersion, err = strconv.Atoi(submatches[1]); err != nil {
+		return 0, 0, 0, false
+	}
+	if vType != versionTypeGA {
+		if minorVersion, err = strconv.Atoi(submatches[3]); err != nil {
+			return 0, 0, 0, false
+		}
+	}
+	return majorVersion, vType, minorVersion, true
+}
+
+// CompareKubeAwareVersionStrings compares two kube-like version strings.
+// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings
+// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major
+// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1.
+func CompareKubeAwareVersionStrings(v1, v2 string) int {
+	if v1 == v2 {
+		return 0
+	}
+	v1major, v1type, v1minor, ok1 := parseKubeVersion(v1)
+	v2major, v2type, v2minor, ok2 := parseKubeVersion(v2)
+	switch {
+	case !ok1 && !ok2:
+		return strings.Compare(v2, v1)
+	case !ok1 && ok2:
+		return -1
+	case ok1 && !ok2:
+		return 1
+	}
+	if v1type != v2type {
+		return int(v1type) - int(v2type)
+	}
+	if v1major != v2major {
+		return v1major - v2major
+	}
+	return v1minor - v2minor
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go
new file mode 100644
index 0000000..72727b5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/version/types.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Info contains versioning information.
+// TODO: Add []string of api versions supported? It's still unclear
+// how we'll want to distribute that information.
+type Info struct {
+	Major        string `json:"major"`
+	Minor        string `json:"minor"`
+	GitVersion   string `json:"gitVersion"`
+	GitCommit    string `json:"gitCommit"`
+	GitTreeState string `json:"gitTreeState"`
+	BuildDate    string `json:"buildDate"`
+	GoVersion    string `json:"goVersion"`
+	Compiler     string `json:"compiler"`
+	Platform     string `json:"platform"`
+}
+
+// String returns info as a human-friendly version string.
+func (info Info) String() string {
+	return info.GitVersion
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
new file mode 100644
index 0000000..7e6bf3f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package watch contains a generic watchable interface, and a fake for
+// testing code that uses the watch interface.
+package watch // import "k8s.io/apimachinery/pkg/watch"
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go
new file mode 100644
index 0000000..22c9449
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"sync"
+)
+
+// FilterFunc should take an event, possibly modify it in some way, and return
+// the modified event. If the event should be ignored, then return keep=false.
+type FilterFunc func(in Event) (out Event, keep bool)
+
+// Filter passes all events through f before allowing them to pass on.
+// Putting a filter on a watch, as an unavoidable side-effect due to the way
+// go channels work, effectively causes the watch's event channel to have its
+// queue length increased by one.
+//
+// WARNING: filter has a fatal flaw, in that it can't properly update the
+// Type field (Add/Modified/Deleted) to reflect items beginning to pass the
+// filter when they previously didn't.
+//
+func Filter(w Interface, f FilterFunc) Interface {
+	fw := &filteredWatch{
+		incoming: w,
+		result:   make(chan Event),
+		f:        f,
+	}
+	go fw.loop()
+	return fw
+}
+
+type filteredWatch struct {
+	incoming Interface
+	result   chan Event
+	f        FilterFunc
+}
+
+// ResultChan returns a channel which will receive filtered events.
+func (fw *filteredWatch) ResultChan() <-chan Event {
+	return fw.result
+}
+
+// Stop stops the upstream watch, which will eventually stop this watch.
+func (fw *filteredWatch) Stop() {
+	fw.incoming.Stop()
+}
+
+// loop waits for new values, filters them, and resends them.
+func (fw *filteredWatch) loop() {
+	defer close(fw.result)
+	for event := range fw.incoming.ResultChan() {
+		filtered, keep := fw.f(event)
+		if keep {
+			fw.result <- filtered
+		}
+	}
+}
+
+// Recorder records all events that are sent from the watch until it is closed.
+type Recorder struct {
+	Interface
+
+	lock   sync.Mutex
+	events []Event
+}
+
+var _ Interface = &Recorder{}
+
+// NewRecorder wraps an Interface and records any changes sent across it.
+func NewRecorder(w Interface) *Recorder {
+	r := &Recorder{}
+	r.Interface = Filter(w, r.record)
+	return r
+}
+
+// record is a FilterFunc and tracks each received event.
+func (r *Recorder) record(in Event) (Event, bool) {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	r.events = append(r.events, in)
+	return in, true
+}
+
+// Events returns a copy of the events sent across this recorder.
+func (r *Recorder) Events() []Event {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	copied := make([]Event, len(r.events))
+	copy(copied, r.events)
+	return copied
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go
new file mode 100644
index 0000000..0ac8dc4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go
@@ -0,0 +1,260 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"sync"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch
+// channel is full.
+type FullChannelBehavior int
+
+const (
+	WaitIfChannelFull FullChannelBehavior = iota
+	DropIfChannelFull
+)
+
+// Buffer the incoming queue a little bit even though it should rarely ever accumulate
+// anything, just in case a few events are received in such a short window that
+// Broadcaster can't move them onto the watchers' queues fast enough.
+const incomingQueueLength = 25
+
+// Broadcaster distributes event notifications among any number of watchers. Every event
+// is delivered to every watcher.
+type Broadcaster struct {
+	// TODO: see if this lock is needed now that new watchers go through
+	// the incoming channel.
+	lock sync.Mutex
+
+	watchers     map[int64]*broadcasterWatcher
+	nextWatcher  int64
+	distributing sync.WaitGroup
+
+	incoming chan Event
+
+	// How large to make watcher's channel.
+	watchQueueLength int
+	// If one of the watch channels is full, don't wait for it to become empty.
+	// Instead just deliver it to the watchers that do have space in their
+	// channels and move on to the next event.
+	// It's more fair to do this on a per-watcher basis than to do it on the
+	// "incoming" channel, which would allow one slow watcher to prevent all
+	// other watchers from getting new events.
+	fullChannelBehavior FullChannelBehavior
+}
+
+// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher.
+// It is guaranteed that events will be distributed in the order in which they occur,
+// but the order in which a single event is distributed among all of the watchers is unspecified.
+func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
+	m := &Broadcaster{
+		watchers:            map[int64]*broadcasterWatcher{},
+		incoming:            make(chan Event, incomingQueueLength),
+		watchQueueLength:    queueLength,
+		fullChannelBehavior: fullChannelBehavior,
+	}
+	m.distributing.Add(1)
+	go m.loop()
+	return m
+}
+
+const internalRunFunctionMarker = "internal-do-function"
+
+// a function type we can shoehorn into the queue.
+type functionFakeRuntimeObject func()
+
+func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind {
+	return schema.EmptyObjectKind
+}
+func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object {
+	if obj == nil {
+		return nil
+	}
+	// funcs are immutable. Hence, just return the original func.
+	return obj
+}
+
+// Execute f, blocking the incoming queue (and waiting for it to drain first).
+// The purpose of this terrible hack is so that watchers added after an event
+// won't ever see that event, and will always see any event after they are
+// added.
+func (b *Broadcaster) blockQueue(f func()) {
+	var wg sync.WaitGroup
+	wg.Add(1)
+	b.incoming <- Event{
+		Type: internalRunFunctionMarker,
+		Object: functionFakeRuntimeObject(func() {
+			defer wg.Done()
+			f()
+		}),
+	}
+	wg.Wait()
+}
+
+// Watch adds a new watcher to the list and returns an Interface for it.
+// Note: new watchers will only receive new events. They won't get an entire history
+// of previous events.
+func (m *Broadcaster) Watch() Interface {
+	var w *broadcasterWatcher
+	m.blockQueue(func() {
+		m.lock.Lock()
+		defer m.lock.Unlock()
+		id := m.nextWatcher
+		m.nextWatcher++
+		w = &broadcasterWatcher{
+			result:  make(chan Event, m.watchQueueLength),
+			stopped: make(chan struct{}),
+			id:      id,
+			m:       m,
+		}
+		m.watchers[id] = w
+	})
+	return w
+}
+
+// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends
+// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster.
+// The returned watch will have a queue length that is at least large enough to accommodate
+// all of the items in queuedEvents.
+func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface {
+	var w *broadcasterWatcher
+	m.blockQueue(func() {
+		m.lock.Lock()
+		defer m.lock.Unlock()
+		id := m.nextWatcher
+		m.nextWatcher++
+		length := m.watchQueueLength
+		if n := len(queuedEvents) + 1; n > length {
+			length = n
+		}
+		w = &broadcasterWatcher{
+			result:  make(chan Event, length),
+			stopped: make(chan struct{}),
+			id:      id,
+			m:       m,
+		}
+		m.watchers[id] = w
+		for _, e := range queuedEvents {
+			w.result <- e
+		}
+	})
+	return w
+}
+
+// stopWatching stops the given watcher and removes it from the list.
+func (m *Broadcaster) stopWatching(id int64) {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	w, ok := m.watchers[id]
+	if !ok {
+		// No need to do anything, it's already been removed from the list.
+		return
+	}
+	delete(m.watchers, id)
+	close(w.result)
+}
+
+// closeAll disconnects all watchers (presumably in response to a Shutdown call).
+func (m *Broadcaster) closeAll() {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	for _, w := range m.watchers {
+		close(w.result)
+	}
+	// Delete everything from the map, since presence/absence in the map is used
+	// by stopWatching to avoid double-closing the channel.
+	m.watchers = map[int64]*broadcasterWatcher{}
+}
+
+// Action distributes the given event among all watchers.
+func (m *Broadcaster) Action(action EventType, obj runtime.Object) {
+	m.incoming <- Event{action, obj}
+}
+
+// Shutdown disconnects all watchers (but any queued events will still be distributed).
+// You must not call Action or Watch* after calling Shutdown. This call blocks
+// until all events have been distributed through the outbound channels. Note
+// that since they can be buffered, this means that the watchers might not
+// have received the data yet as it can remain sitting in the buffered
+// channel.
+func (m *Broadcaster) Shutdown() {
+	close(m.incoming)
+	m.distributing.Wait()
+}
+
+// loop receives from m.incoming and distributes to all watchers.
+func (m *Broadcaster) loop() {
+	// Deliberately not catching crashes here. Yes, bring down the process if there's a
+	// bug in watch.Broadcaster.
+	for event := range m.incoming {
+		if event.Type == internalRunFunctionMarker {
+			event.Object.(functionFakeRuntimeObject)()
+			continue
+		}
+		m.distribute(event)
+	}
+	m.closeAll()
+	m.distributing.Done()
+}
+
+// distribute sends event to all watchers. Blocking.
+func (m *Broadcaster) distribute(event Event) {
+	m.lock.Lock()
+	defer m.lock.Unlock()
+	if m.fullChannelBehavior == DropIfChannelFull {
+		for _, w := range m.watchers {
+			select {
+			case w.result <- event:
+			case <-w.stopped:
+			default: // Don't block if the event can't be queued.
+			}
+		}
+	} else {
+		for _, w := range m.watchers {
+			select {
+			case w.result <- event:
+			case <-w.stopped:
+			}
+		}
+	}
+}
+
+// broadcasterWatcher handles a single watcher of a broadcaster
+type broadcasterWatcher struct {
+	result  chan Event
+	stopped chan struct{}
+	stop    sync.Once
+	id      int64
+	m       *Broadcaster
+}
+
+// ResultChan returns a channel to use for waiting on events.
+func (mw *broadcasterWatcher) ResultChan() <-chan Event {
+	return mw.result
+}
+
+// Stop stops watching and removes mw from its list.
+func (mw *broadcasterWatcher) Stop() {
+	mw.stop.Do(func() {
+		close(mw.stopped)
+		mw.m.stopWatching(mw.id)
+	})
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
new file mode 100644
index 0000000..8271e9b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"fmt"
+	"io"
+	"sync"
+
+	"k8s.io/klog/v2"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/net"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Decoder allows StreamWatcher to watch any stream for which a Decoder can be written.
+type Decoder interface {
+	// Decode should return the type of event, the decoded object, or an error.
+	// An error will cause StreamWatcher to call Close(). Decode should block until
+	// it has data or an error occurs.
+	Decode() (action EventType, object runtime.Object, err error)
+
+	// Close should close the underlying io.Reader, signalling to the source of
+	// the stream that it is no longer being watched. Close() must cause any
+	// outstanding call to Decode() to return with an error of some sort.
+	Close()
+}
+
+// Reporter hides the details of how an error is turned into a runtime.Object for
+// reporting on a watch stream since this package may not import a higher level report.
+type Reporter interface {
+	// AsObject must convert err into a valid runtime.Object for the watch stream.
+	AsObject(err error) runtime.Object
+}
+
+// StreamWatcher turns any stream for which you can write a Decoder interface
+// into a watch.Interface.
+type StreamWatcher struct {
+	sync.Mutex
+	source   Decoder
+	reporter Reporter
+	result   chan Event
+	stopped  bool
+}
+
+// NewStreamWatcher creates a StreamWatcher from the given decoder.
+func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher {
+	sw := &StreamWatcher{
+		source:   d,
+		reporter: r,
+		// It's easy for a consumer to add buffering via an extra
+		// goroutine/channel, but impossible for them to remove it,
+		// so nonbuffered is better.
+		result: make(chan Event),
+	}
+	go sw.receive()
+	return sw
+}
+
+// ResultChan implements Interface.
+func (sw *StreamWatcher) ResultChan() <-chan Event {
+	return sw.result
+}
+
+// Stop implements Interface.
+func (sw *StreamWatcher) Stop() {
+	// Call Close() exactly once by locking and setting a flag.
+	sw.Lock()
+	defer sw.Unlock()
+	if !sw.stopped {
+		sw.stopped = true
+		sw.source.Close()
+	}
+}
+
+// stopping returns true if Stop() was called previously.
+func (sw *StreamWatcher) stopping() bool {
+	sw.Lock()
+	defer sw.Unlock()
+	return sw.stopped
+}
+
+// receive reads result from the decoder in a loop and sends down the result channel.
+func (sw *StreamWatcher) receive() {
+	defer close(sw.result)
+	defer sw.Stop()
+	defer utilruntime.HandleCrash()
+	for {
+		action, obj, err := sw.source.Decode()
+		if err != nil {
+			// Ignore expected error.
+			if sw.stopping() {
+				return
+			}
+			switch err {
+			case io.EOF:
+				// watch closed normally
+			case io.ErrUnexpectedEOF:
+				klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
+			default:
+				if net.IsProbableEOF(err) || net.IsTimeout(err) {
+					klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
+				} else {
+					sw.result <- Event{
+						Type:   Error,
+						Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)),
+					}
+				}
+			}
+			return
+		}
+		sw.result <- Event{
+			Type:   action,
+			Object: obj,
+		}
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
new file mode 100644
index 0000000..1f4911a
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
@@ -0,0 +1,324 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"fmt"
+	"sync"
+
+	"k8s.io/klog/v2"
+
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Interface can be implemented by anything that knows how to watch and report changes.
+type Interface interface {
+	// Stops watching. Will close the channel returned by ResultChan(). Releases
+	// any resources used by the watch.
+	Stop()
+
+	// Returns a chan which will receive all the events. If an error occurs
+	// or Stop() is called, the implementation will close this channel and
+	// release any resources used by the watch.
+	ResultChan() <-chan Event
+}
+
+// EventType defines the possible types of events.
+type EventType string
+
+const (
+	Added    EventType = "ADDED"
+	Modified EventType = "MODIFIED"
+	Deleted  EventType = "DELETED"
+	Bookmark EventType = "BOOKMARK"
+	Error    EventType = "ERROR"
+)
+
+var (
+	DefaultChanSize int32 = 100
+)
+
+// Event represents a single event to a watched resource.
+// +k8s:deepcopy-gen=true
+type Event struct {
+	Type EventType
+
+	// Object is:
+	//  * If Type is Added or Modified: the new state of the object.
+	//  * If Type is Deleted: the state of the object immediately before deletion.
+	//  * If Type is Bookmark: the object (instance of a type being watched) where
+	//    only ResourceVersion field is set. On successful restart of watch from a
+	//    bookmark resourceVersion, client is guaranteed to not get repeat event
+	//    nor miss any events.
+	//  * If Type is Error: *api.Status is recommended; other types may make sense
+	//    depending on context.
+	Object runtime.Object
+}
+
+type emptyWatch chan Event
+
+// NewEmptyWatch returns a watch interface that returns no results and is closed.
+// May be used in certain error conditions where no information is available but
+// an error is not warranted.
+func NewEmptyWatch() Interface {
+	ch := make(chan Event)
+	close(ch)
+	return emptyWatch(ch)
+}
+
+// Stop implements Interface
+func (w emptyWatch) Stop() {
+}
+
+// ResultChan implements Interface
+func (w emptyWatch) ResultChan() <-chan Event {
+	return chan Event(w)
+}
+
+// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
+type FakeWatcher struct {
+	result  chan Event
+	stopped bool
+	sync.Mutex
+}
+
+func NewFake() *FakeWatcher {
+	return &FakeWatcher{
+		result: make(chan Event),
+	}
+}
+
+func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
+	return &FakeWatcher{
+		result: make(chan Event, size),
+	}
+}
+
+// Stop implements Interface.Stop().
+func (f *FakeWatcher) Stop() {
+	f.Lock()
+	defer f.Unlock()
+	if !f.stopped {
+		klog.V(4).Infof("Stopping fake watcher.")
+		close(f.result)
+		f.stopped = true
+	}
+}
+
+func (f *FakeWatcher) IsStopped() bool {
+	f.Lock()
+	defer f.Unlock()
+	return f.stopped
+}
+
+// Reset prepares the watcher to be reused.
+func (f *FakeWatcher) Reset() {
+	f.Lock()
+	defer f.Unlock()
+	f.stopped = false
+	f.result = make(chan Event)
+}
+
+func (f *FakeWatcher) ResultChan() <-chan Event {
+	return f.result
+}
+
+// Add sends an add event.
+func (f *FakeWatcher) Add(obj runtime.Object) {
+	f.result <- Event{Added, obj}
+}
+
+// Modify sends a modify event.
+func (f *FakeWatcher) Modify(obj runtime.Object) {
+	f.result <- Event{Modified, obj}
+}
+
+// Delete sends a delete event.
+func (f *FakeWatcher) Delete(lastValue runtime.Object) {
+	f.result <- Event{Deleted, lastValue}
+}
+
+// Error sends an Error event.
+func (f *FakeWatcher) Error(errValue runtime.Object) {
+	f.result <- Event{Error, errValue}
+}
+
+// Action sends an event of the requested type, for table-based testing.
+func (f *FakeWatcher) Action(action EventType, obj runtime.Object) {
+	f.result <- Event{action, obj}
+}
+
+// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
+type RaceFreeFakeWatcher struct {
+	result  chan Event
+	Stopped bool
+	sync.Mutex
+}
+
+func NewRaceFreeFake() *RaceFreeFakeWatcher {
+	return &RaceFreeFakeWatcher{
+		result: make(chan Event, DefaultChanSize),
+	}
+}
+
+// Stop implements Interface.Stop().
+func (f *RaceFreeFakeWatcher) Stop() {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		klog.V(4).Infof("Stopping fake watcher.")
+		close(f.result)
+		f.Stopped = true
+	}
+}
+
+func (f *RaceFreeFakeWatcher) IsStopped() bool {
+	f.Lock()
+	defer f.Unlock()
+	return f.Stopped
+}
+
+// Reset prepares the watcher to be reused.
+func (f *RaceFreeFakeWatcher) Reset() {
+	f.Lock()
+	defer f.Unlock()
+	f.Stopped = false
+	f.result = make(chan Event, DefaultChanSize)
+}
+
+func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event {
+	f.Lock()
+	defer f.Unlock()
+	return f.result
+}
+
+// Add sends an add event.
+func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Added, obj}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Modify sends a modify event.
+func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Modified, obj}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Delete sends a delete event.
+func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Deleted, lastValue}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Error sends an Error event.
+func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{Error, errValue}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// Action sends an event of the requested type, for table-based testing.
+func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) {
+	f.Lock()
+	defer f.Unlock()
+	if !f.Stopped {
+		select {
+		case f.result <- Event{action, obj}:
+			return
+		default:
+			panic(fmt.Errorf("channel full"))
+		}
+	}
+}
+
+// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe.
+type ProxyWatcher struct {
+	result chan Event
+	stopCh chan struct{}
+
+	mutex   sync.Mutex
+	stopped bool
+}
+
+var _ Interface = &ProxyWatcher{}
+
+// NewProxyWatcher creates new ProxyWatcher by wrapping a channel
+func NewProxyWatcher(ch chan Event) *ProxyWatcher {
+	return &ProxyWatcher{
+		result:  ch,
+		stopCh:  make(chan struct{}),
+		stopped: false,
+	}
+}
+
+// Stop implements Interface
+func (pw *ProxyWatcher) Stop() {
+	pw.mutex.Lock()
+	defer pw.mutex.Unlock()
+	if !pw.stopped {
+		pw.stopped = true
+		close(pw.stopCh)
+	}
+}
+
+// Stopping returns true if Stop() has been called
+func (pw *ProxyWatcher) Stopping() bool {
+	pw.mutex.Lock()
+	defer pw.mutex.Unlock()
+	return pw.stopped
+}
+
+// ResultChan implements Interface
+func (pw *ProxyWatcher) ResultChan() <-chan Event {
+	return pw.result
+}
+
+// StopChan returns stop channel
+func (pw *ProxyWatcher) StopChan() <-chan struct{} {
+	return pw.stopCh
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
new file mode 100644
index 0000000..71ef4da
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go
@@ -0,0 +1,40 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package watch
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Event) DeepCopyInto(out *Event) {
+	*out = *in
+	if in.Object != nil {
+		out.Object = in.Object.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
+func (in *Event) DeepCopy() *Event {
+	if in == nil {
+		return nil
+	}
+	out := new(Event)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
new file mode 100644
index 0000000..6be8034
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go
@@ -0,0 +1,388 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package reflect is a fork of go's standard library reflection package, which
+// allows for deep equal with equality functions defined.
+package reflect
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// Equalities is a map from type to a function comparing two values of
+// that type.
+type Equalities map[reflect.Type]reflect.Value
+
+// For convenience, panics on errors
+func EqualitiesOrDie(funcs ...interface{}) Equalities {
+	e := Equalities{}
+	if err := e.AddFuncs(funcs...); err != nil {
+		panic(err)
+	}
+	return e
+}
+
+// AddFuncs is a shortcut for multiple calls to AddFunc.
+func (e Equalities) AddFuncs(funcs ...interface{}) error {
+	for _, f := range funcs {
+		if err := e.AddFunc(f); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddFunc uses func as an equality function: it must take
+// two parameters of the same type, and return a boolean.
+func (e Equalities) AddFunc(eqFunc interface{}) error {
+	fv := reflect.ValueOf(eqFunc)
+	ft := fv.Type()
+	if ft.Kind() != reflect.Func {
+		return fmt.Errorf("expected func, got: %v", ft)
+	}
+	if ft.NumIn() != 2 {
+		return fmt.Errorf("expected two 'in' params, got: %v", ft)
+	}
+	if ft.NumOut() != 1 {
+		return fmt.Errorf("expected one 'out' param, got: %v", ft)
+	}
+	if ft.In(0) != ft.In(1) {
+		return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft)
+	}
+	var forReturnType bool
+	boolType := reflect.TypeOf(forReturnType)
+	if ft.Out(0) != boolType {
+		return fmt.Errorf("expected bool return, got: %v", ft)
+	}
+	e[ft.In(0)] = fv
+	return nil
+}
+
+// Below here is forked from go's reflect/deepequal.go
+
+// During deepValueEqual, must keep track of checks that are
+// in progress.  The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited comparisons are stored in a map indexed by visit.
+type visit struct {
+	a1  uintptr
+	a2  uintptr
+	typ reflect.Type
+}
+
+// unexportedTypePanic is thrown when you use this DeepEqual on something that has an
+// unexported type. It indicates a programmer error, so should not occur at runtime,
+// which is why it's not public and thus impossible to catch.
+type unexportedTypePanic []reflect.Type
+
+func (u unexportedTypePanic) Error() string { return u.String() }
+func (u unexportedTypePanic) String() string {
+	strs := make([]string, len(u))
+	for i, t := range u {
+		strs[i] = fmt.Sprintf("%v", t)
+	}
+	return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ")
+}
+
+func makeUsefulPanic(v reflect.Value) {
+	if x := recover(); x != nil {
+		if u, ok := x.(unexportedTypePanic); ok {
+			u = append(unexportedTypePanic{v.Type()}, u...)
+			x = u
+		}
+		panic(x)
+	}
+}
+
+// Tests for deep equality using reflected types. The map argument tracks
+// comparisons that have already been seen, which allows short circuiting on
+// recursive types.
+func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+	defer makeUsefulPanic(v1)
+
+	if !v1.IsValid() || !v2.IsValid() {
+		return v1.IsValid() == v2.IsValid()
+	}
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if fv, ok := e[v1.Type()]; ok {
+		return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+	}
+
+	hard := func(k reflect.Kind) bool {
+		switch k {
+		case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+			return true
+		}
+		return false
+	}
+
+	if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+		addr1 := v1.UnsafeAddr()
+		addr2 := v2.UnsafeAddr()
+		if addr1 > addr2 {
+			// Canonicalize order to reduce number of entries in visited.
+			addr1, addr2 = addr2, addr1
+		}
+
+		// Short circuit if references are identical ...
+		if addr1 == addr2 {
+			return true
+		}
+
+		// ... or already seen
+		typ := v1.Type()
+		v := visit{addr1, addr2, typ}
+		if visited[v] {
+			return true
+		}
+
+		// Remember for later.
+		visited[v] = true
+	}
+
+	switch v1.Kind() {
+	case reflect.Array:
+		// We don't need to check length here because length is part of
+		// an array's type, which has already been filtered for.
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Slice:
+		if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+			return false
+		}
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Interface:
+		if v1.IsNil() || v2.IsNil() {
+			return v1.IsNil() == v2.IsNil()
+		}
+		return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Ptr:
+		return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Struct:
+		for i, n := 0, v1.NumField(); i < n; i++ {
+			if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Map:
+		if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) {
+			return false
+		}
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for _, k := range v1.MapKeys() {
+			if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Func:
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		// Can't do better than this:
+		return false
+	default:
+		// Normal equality suffices
+		if !v1.CanInterface() || !v2.CanInterface() {
+			panic(unexportedTypePanic{})
+		}
+		return v1.Interface() == v2.Interface()
+	}
+}
+
+// DeepEqual is like reflect.DeepEqual, but focused on semantic equality
+// instead of memory equality.
+//
+// It will use e's equality functions if it finds types that match.
+//
+// An empty slice *is* equal to a nil slice for our purposes; same for maps.
+//
+// Unexported field members cannot be compared and will cause an informative panic; you must add an Equality
+// function for these types.
+func (e Equalities) DeepEqual(a1, a2 interface{}) bool {
+	if a1 == nil || a2 == nil {
+		return a1 == a2
+	}
+	v1 := reflect.ValueOf(a1)
+	v2 := reflect.ValueOf(a2)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	return e.deepValueEqual(v1, v2, make(map[visit]bool), 0)
+}
+
+func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool {
+	defer makeUsefulPanic(v1)
+
+	if !v1.IsValid() || !v2.IsValid() {
+		return v1.IsValid() == v2.IsValid()
+	}
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if fv, ok := e[v1.Type()]; ok {
+		return fv.Call([]reflect.Value{v1, v2})[0].Bool()
+	}
+
+	hard := func(k reflect.Kind) bool {
+		switch k {
+		case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct:
+			return true
+		}
+		return false
+	}
+
+	if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) {
+		addr1 := v1.UnsafeAddr()
+		addr2 := v2.UnsafeAddr()
+		if addr1 > addr2 {
+			// Canonicalize order to reduce number of entries in visited.
+			addr1, addr2 = addr2, addr1
+		}
+
+		// Short circuit if references are identical ...
+		if addr1 == addr2 {
+			return true
+		}
+
+		// ... or already seen
+		typ := v1.Type()
+		v := visit{addr1, addr2, typ}
+		if visited[v] {
+			return true
+		}
+
+		// Remember for later.
+		visited[v] = true
+	}
+
+	switch v1.Kind() {
+	case reflect.Array:
+		// We don't need to check length here because length is part of
+		// an array's type, which has already been filtered for.
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Slice:
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() > v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		if v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() > v2.Len() {
+			return false
+		}
+		return v1.String() == v2.String()
+	case reflect.Interface:
+		if v1.IsNil() {
+			return true
+		}
+		return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Ptr:
+		if v1.IsNil() {
+			return true
+		}
+		return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1)
+	case reflect.Struct:
+		for i, n := 0, v1.NumField(); i < n; i++ {
+			if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Map:
+		if v1.IsNil() || v1.Len() == 0 {
+			return true
+		}
+		if v1.Len() > v2.Len() {
+			return false
+		}
+		if v1.Pointer() == v2.Pointer() {
+			return true
+		}
+		for _, k := range v1.MapKeys() {
+			if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) {
+				return false
+			}
+		}
+		return true
+	case reflect.Func:
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		// Can't do better than this:
+		return false
+	default:
+		// Normal equality suffices
+		if !v1.CanInterface() || !v2.CanInterface() {
+			panic(unexportedTypePanic{})
+		}
+		return v1.Interface() == v2.Interface()
+	}
+}
+
+// DeepDerivative is similar to DeepEqual except that unset fields in a1 are
+// ignored (not compared). This allows us to focus on the fields that matter to
+// the semantic comparison.
+//
+// The unset fields include a nil pointer and an empty string.
+func (e Equalities) DeepDerivative(a1, a2 interface{}) bool {
+	if a1 == nil {
+		return true
+	}
+	v1 := reflect.ValueOf(a1)
+	v2 := reflect.ValueOf(a2)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	return e.deepValueDerive(v1, v2, make(map[visit]bool), 0)
+}