[VOL-3678] First implementation of the BBSim-sadis-server

Change-Id: I5077a8f861f4cc6af9759f31a4a415042c05eba3
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
new file mode 100644
index 0000000..f21b0ef
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -0,0 +1,324 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+	"mime"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+	"k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+)
+
+// serializerExtensions are for serializers that are conditionally compiled in
+var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
+
+type serializerType struct {
+	AcceptContentTypes []string
+	ContentType        string
+	FileExtensions     []string
+	// EncodesAsText should be true if this content type can be represented safely in UTF-8
+	EncodesAsText bool
+
+	Serializer       runtime.Serializer
+	PrettySerializer runtime.Serializer
+
+	AcceptStreamContentTypes []string
+	StreamContentType        string
+
+	Framer           runtime.Framer
+	StreamSerializer runtime.Serializer
+}
+
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
+	jsonSerializer := json.NewSerializerWithOptions(
+		mf, scheme, scheme,
+		json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
+	)
+	jsonSerializerType := serializerType{
+		AcceptContentTypes: []string{runtime.ContentTypeJSON},
+		ContentType:        runtime.ContentTypeJSON,
+		FileExtensions:     []string{"json"},
+		EncodesAsText:      true,
+		Serializer:         jsonSerializer,
+
+		Framer:           json.Framer,
+		StreamSerializer: jsonSerializer,
+	}
+	if options.Pretty {
+		jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
+			mf, scheme, scheme,
+			json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict},
+		)
+	}
+
+	yamlSerializer := json.NewSerializerWithOptions(
+		mf, scheme, scheme,
+		json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
+	)
+	protoSerializer := protobuf.NewSerializer(scheme, scheme)
+	protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
+
+	serializers := []serializerType{
+		jsonSerializerType,
+		{
+			AcceptContentTypes: []string{runtime.ContentTypeYAML},
+			ContentType:        runtime.ContentTypeYAML,
+			FileExtensions:     []string{"yaml"},
+			EncodesAsText:      true,
+			Serializer:         yamlSerializer,
+		},
+		{
+			AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
+			ContentType:        runtime.ContentTypeProtobuf,
+			FileExtensions:     []string{"pb"},
+			Serializer:         protoSerializer,
+
+			Framer:           protobuf.LengthDelimitedFramer,
+			StreamSerializer: protoRawSerializer,
+		},
+	}
+
+	for _, fn := range serializerExtensions {
+		if serializer, ok := fn(scheme); ok {
+			serializers = append(serializers, serializer)
+		}
+	}
+	return serializers
+}
+
+// CodecFactory provides methods for retrieving codecs and serializers for specific
+// versions and content types.
+type CodecFactory struct {
+	scheme      *runtime.Scheme
+	serializers []serializerType
+	universal   runtime.Decoder
+	accepts     []runtime.SerializerInfo
+
+	legacySerializer runtime.Serializer
+}
+
+// CodecFactoryOptions holds the options for configuring CodecFactory behavior
+type CodecFactoryOptions struct {
+	// Strict configures all serializers in strict mode
+	Strict bool
+	// Pretty includes a pretty serializer along with the non-pretty one
+	Pretty bool
+}
+
+// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
+// Functions implementing this type can be passed to the NewCodecFactory() constructor.
+type CodecFactoryOptionsMutator func(*CodecFactoryOptions)
+
+// EnablePretty enables including a pretty serializer along with the non-pretty one
+func EnablePretty(options *CodecFactoryOptions) {
+	options.Pretty = true
+}
+
+// DisablePretty disables including a pretty serializer along with the non-pretty one
+func DisablePretty(options *CodecFactoryOptions) {
+	options.Pretty = false
+}
+
+// EnableStrict enables configuring all serializers in strict mode
+func EnableStrict(options *CodecFactoryOptions) {
+	options.Strict = true
+}
+
+// DisableStrict disables configuring all serializers in strict mode
+func DisableStrict(options *CodecFactoryOptions) {
+	options.Strict = false
+}
+
+// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
+// and conversion wrappers to define preferred internal and external versions. In the future,
+// as the internal version is used less, callers may instead use a defaulting serializer and
+// only convert objects which are shared internally (Status, common API machinery).
+//
+// Mutators can be passed to change the CodecFactoryOptions before construction of the factory.
+// It is recommended to explicitly pass mutators instead of relying on defaults.
+// By default, Pretty is enabled -- this is conformant with previously supported behavior.
+//
+// TODO: allow other codecs to be compiled in?
+// TODO: accept a scheme interface
+func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory {
+	options := CodecFactoryOptions{Pretty: true}
+	for _, fn := range mutators {
+		fn(&options)
+	}
+
+	serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options)
+	return newCodecFactory(scheme, serializers)
+}
+
+// newCodecFactory is a helper for testing that allows a different metafactory to be specified.
+func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+	decoders := make([]runtime.Decoder, 0, len(serializers))
+	var accepts []runtime.SerializerInfo
+	alreadyAccepted := make(map[string]struct{})
+
+	var legacySerializer runtime.Serializer
+	for _, d := range serializers {
+		decoders = append(decoders, d.Serializer)
+		for _, mediaType := range d.AcceptContentTypes {
+			if _, ok := alreadyAccepted[mediaType]; ok {
+				continue
+			}
+			alreadyAccepted[mediaType] = struct{}{}
+			info := runtime.SerializerInfo{
+				MediaType:        d.ContentType,
+				EncodesAsText:    d.EncodesAsText,
+				Serializer:       d.Serializer,
+				PrettySerializer: d.PrettySerializer,
+			}
+
+			mediaType, _, err := mime.ParseMediaType(info.MediaType)
+			if err != nil {
+				panic(err)
+			}
+			parts := strings.SplitN(mediaType, "/", 2)
+			info.MediaTypeType = parts[0]
+			info.MediaTypeSubType = parts[1]
+
+			if d.StreamSerializer != nil {
+				info.StreamSerializer = &runtime.StreamSerializerInfo{
+					Serializer:    d.StreamSerializer,
+					EncodesAsText: d.EncodesAsText,
+					Framer:        d.Framer,
+				}
+			}
+			accepts = append(accepts, info)
+			if mediaType == runtime.ContentTypeJSON {
+				legacySerializer = d.Serializer
+			}
+		}
+	}
+	if legacySerializer == nil {
+		legacySerializer = serializers[0].Serializer
+	}
+
+	return CodecFactory{
+		scheme:      scheme,
+		serializers: serializers,
+		universal:   recognizer.NewDecoder(decoders...),
+
+		accepts: accepts,
+
+		legacySerializer: legacySerializer,
+	}
+}
+
+// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the
+// caller requests it.
+func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer {
+	return WithoutConversionCodecFactory{f}
+}
+
+// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
+func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
+	return f.accepts
+}
+
+// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from
+// any recognized source. The returned codec will always encode output to JSON. If a type is not
+// found in the list of versions an error will be returned.
+//
+// This method is deprecated - clients and servers should negotiate a serializer by mime-type and
+// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
+//
+// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions.
+//   All other callers will be forced to request a Codec directly.
+func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec {
+	return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner)
+}
+
+// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies
+// runtime.Object. It does not perform conversion. It does not perform defaulting.
+func (f CodecFactory) UniversalDeserializer() runtime.Decoder {
+	return f.universal
+}
+
+// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used
+// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes
+// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate
+// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified,
+// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs
+// defaulting.
+//
+// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form
+// TODO: only accept a group versioner
+func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder {
+	var versioner runtime.GroupVersioner
+	if len(versions) == 0 {
+		versioner = runtime.InternalGroupVersioner
+	} else {
+		versioner = schema.GroupVersions(versions)
+	}
+	return f.CodecForVersions(nil, f.universal, nil, versioner)
+}
+
+// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list,
+// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not
+// converted. If encode or decode are nil, no conversion is performed.
+func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec {
+	// TODO: these are for backcompat, remove them in the future
+	if encode == nil {
+		encode = runtime.DisabledGroupVersioner
+	}
+	if decode == nil {
+		decode = runtime.InternalGroupVersioner
+	}
+	return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode)
+}
+
+// DecoderToVersion returns a decoder that targets the provided group version.
+func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
+	return f.CodecForVersions(nil, decoder, nil, gv)
+}
+
+// EncoderForVersion returns an encoder that targets the provided group version.
+func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
+	return f.CodecForVersions(encoder, nil, gv, nil)
+}
+
+// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion.
+// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future
+// will be unnecessary when we change the signature of NegotiatedSerializer.
+type WithoutConversionCodecFactory struct {
+	CodecFactory
+}
+
+// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object
+// when serialized.
+func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
+	return runtime.WithVersionEncoder{
+		Version:     version,
+		Encoder:     serializer,
+		ObjectTyper: f.CodecFactory.scheme,
+	}
+}
+
+// DecoderToVersion returns an decoder that does not do conversion.
+func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
+	return runtime.WithoutVersionDecoder{
+		Decoder: serializer,
+	}
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
new file mode 100644
index 0000000..e081d7f
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go
@@ -0,0 +1,388 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"encoding/json"
+	"io"
+	"strconv"
+	"unsafe"
+
+	jsoniter "github.com/json-iterator/go"
+	"github.com/modern-go/reflect2"
+	"sigs.k8s.io/yaml"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/util/framer"
+	utilyaml "k8s.io/apimachinery/pkg/util/yaml"
+	"k8s.io/klog/v2"
+)
+
+// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
+// is not nil, the object has the group, version, and kind fields set.
+// Deprecated: use NewSerializerWithOptions instead.
+func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
+	return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
+}
+
+// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
+// matches JSON, and will error if constructs are used that do not serialize to JSON.
+// Deprecated: use NewSerializerWithOptions instead.
+func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+	return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
+}
+
+// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
+// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer
+// and are immutable.
+func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer {
+	return &Serializer{
+		meta:       meta,
+		creater:    creater,
+		typer:      typer,
+		options:    options,
+		identifier: identifier(options),
+	}
+}
+
+// identifier computes Identifier of Encoder based on the given options.
+func identifier(options SerializerOptions) runtime.Identifier {
+	result := map[string]string{
+		"name":   "json",
+		"yaml":   strconv.FormatBool(options.Yaml),
+		"pretty": strconv.FormatBool(options.Pretty),
+	}
+	identifier, err := json.Marshal(result)
+	if err != nil {
+		klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err)
+	}
+	return runtime.Identifier(identifier)
+}
+
+// SerializerOptions holds the options which are used to configure a JSON/YAML serializer.
+// example:
+// (1) To configure a JSON serializer, set `Yaml` to `false`.
+// (2) To configure a YAML serializer, set `Yaml` to `true`.
+// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`.
+type SerializerOptions struct {
+	// Yaml: configures the Serializer to work with JSON(false) or YAML(true).
+	// When `Yaml` is enabled, this serializer only supports the subset of YAML that
+	// matches JSON, and will error if constructs are used that do not serialize to JSON.
+	Yaml bool
+
+	// Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output.
+	// This option is silently ignored when `Yaml` is `true`.
+	Pretty bool
+
+	// Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
+	// Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
+	Strict bool
+}
+
+type Serializer struct {
+	meta    MetaFactory
+	options SerializerOptions
+	creater runtime.ObjectCreater
+	typer   runtime.ObjectTyper
+
+	identifier runtime.Identifier
+}
+
+// Serializer implements Serializer
+var _ runtime.Serializer = &Serializer{}
+var _ recognizer.RecognizingDecoder = &Serializer{}
+
+type customNumberExtension struct {
+	jsoniter.DummyExtension
+}
+
+func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
+	if typ.String() == "interface {}" {
+		return customNumberDecoder{}
+	}
+	return nil
+}
+
+type customNumberDecoder struct {
+}
+
+func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+	switch iter.WhatIsNext() {
+	case jsoniter.NumberValue:
+		var number jsoniter.Number
+		iter.ReadVal(&number)
+		i64, err := strconv.ParseInt(string(number), 10, 64)
+		if err == nil {
+			*(*interface{})(ptr) = i64
+			return
+		}
+		f64, err := strconv.ParseFloat(string(number), 64)
+		if err == nil {
+			*(*interface{})(ptr) = f64
+			return
+		}
+		iter.ReportError("DecodeNumber", err.Error())
+	default:
+		*(*interface{})(ptr) = iter.Read()
+	}
+}
+
+// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive when unmarshalling, and otherwise compatible with
+// the encoding/json standard library.
+func CaseSensitiveJsonIterator() jsoniter.API {
+	config := jsoniter.Config{
+		EscapeHTML:             true,
+		SortMapKeys:            true,
+		ValidateJsonRawMessage: true,
+		CaseSensitive:          true,
+	}.Froze()
+	// Force jsoniter to decode number to interface{} via int64/float64, if possible.
+	config.RegisterExtension(&customNumberExtension{})
+	return config
+}
+
+// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
+// the encoding/json standard library.
+func StrictCaseSensitiveJsonIterator() jsoniter.API {
+	config := jsoniter.Config{
+		EscapeHTML:             true,
+		SortMapKeys:            true,
+		ValidateJsonRawMessage: true,
+		CaseSensitive:          true,
+		DisallowUnknownFields:  true,
+	}.Froze()
+	// Force jsoniter to decode number to interface{} via int64/float64, if possible.
+	config.RegisterExtension(&customNumberExtension{})
+	return config
+}
+
+// Private copies of jsoniter to try to shield against possible mutations
+// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
+// in some other library will mess with every usage of the jsoniter library in the whole program.
+// See https://github.com/json-iterator/go/issues/265
+var caseSensitiveJsonIterator = CaseSensitiveJsonIterator()
+var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator()
+
+// gvkWithDefaults returns group kind and version defaulting from provided default
+func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
+	if len(actual.Kind) == 0 {
+		actual.Kind = defaultGVK.Kind
+	}
+	if len(actual.Version) == 0 && len(actual.Group) == 0 {
+		actual.Group = defaultGVK.Group
+		actual.Version = defaultGVK.Version
+	}
+	if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
+		actual.Version = defaultGVK.Version
+	}
+	return actual
+}
+
+// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then
+// load that data into an object matching the desired schema kind or the provided into.
+// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed.
+// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling.
+// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk.
+// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk)
+// On success or most errors, the method will return the calculated schema kind.
+// The gvk calculate priority will be originalData > default gvk > into
+func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	data := originalData
+	if s.options.Yaml {
+		altered, err := yaml.YAMLToJSON(data)
+		if err != nil {
+			return nil, nil, err
+		}
+		data = altered
+	}
+
+	actual, err := s.meta.Interpret(data)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if gvk != nil {
+		*actual = gvkWithDefaults(*actual, *gvk)
+	}
+
+	if unk, ok := into.(*runtime.Unknown); ok && unk != nil {
+		unk.Raw = originalData
+		unk.ContentType = runtime.ContentTypeJSON
+		unk.GetObjectKind().SetGroupVersionKind(*actual)
+		return unk, actual, nil
+	}
+
+	if into != nil {
+		_, isUnstructured := into.(runtime.Unstructured)
+		types, _, err := s.typer.ObjectKinds(into)
+		switch {
+		case runtime.IsNotRegisteredError(err), isUnstructured:
+			if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil {
+				return nil, actual, err
+			}
+			return into, actual, nil
+		case err != nil:
+			return nil, actual, err
+		default:
+			*actual = gvkWithDefaults(*actual, types[0])
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, actual, runtime.NewMissingKindErr(string(originalData))
+	}
+	if len(actual.Version) == 0 {
+		return nil, actual, runtime.NewMissingVersionErr(string(originalData))
+	}
+
+	// use the target if necessary
+	obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil {
+		return nil, actual, err
+	}
+
+	// If the deserializer is non-strict, return successfully here.
+	if !s.options.Strict {
+		return obj, actual, nil
+	}
+
+	// In strict mode pass the data trough the YAMLToJSONStrict converter.
+	// This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data,
+	// the output would equal the input, unless there is a parsing error such as duplicate fields.
+	// As we know this was successful in the non-strict case, the only error that may be returned here
+	// is because of the newly-added strictness. hence we know we can return the typed strictDecoderError
+	// the actual error is that the object contains duplicate fields.
+	altered, err := yaml.YAMLToJSONStrict(originalData)
+	if err != nil {
+		return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
+	}
+	// As performance is not an issue for now for the strict deserializer (one has regardless to do
+	// the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated
+	// fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is
+	// due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
+	// the actual error is that the object contains unknown field.
+	strictObj := obj.DeepCopyObject()
+	if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil {
+		return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
+	}
+	// Always return the same object as the non-strict serializer to avoid any deviations.
+	return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, w)
+	}
+	return s.doEncode(obj, w)
+}
+
+func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
+	if s.options.Yaml {
+		json, err := caseSensitiveJsonIterator.Marshal(obj)
+		if err != nil {
+			return err
+		}
+		data, err := yaml.JSONToYAML(json)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	}
+
+	if s.options.Pretty {
+		data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", "  ")
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	}
+	encoder := json.NewEncoder(w)
+	return encoder.Encode(obj)
+}
+
+// Identifier implements runtime.Encoder interface.
+func (s *Serializer) Identifier() runtime.Identifier {
+	return s.identifier
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
+	if s.options.Yaml {
+		// we could potentially look for '---'
+		return false, true, nil
+	}
+	_, _, ok = utilyaml.GuessJSONStream(peek, 2048)
+	return ok, false, nil
+}
+
+// Framer is the default JSON framing behavior, with newlines delimiting individual objects.
+var Framer = jsonFramer{}
+
+type jsonFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer {
+	// we can write JSON objects directly to the writer, because they are self-framing
+	return w
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	// we need to extract the JSON chunks of data to pass to Decode()
+	return framer.NewJSONFramedReader(r)
+}
+
+// YAMLFramer is the default JSON framing behavior, with newlines delimiting individual objects.
+var YAMLFramer = yamlFramer{}
+
+type yamlFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer {
+	return yamlFrameWriter{w}
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	// extract the YAML document chunks directly
+	return utilyaml.NewDocumentDecoder(r)
+}
+
+type yamlFrameWriter struct {
+	w io.Writer
+}
+
+// Write separates each document with the YAML document separator (`---` followed by line
+// break). Writers must write well formed YAML documents (include a final line break).
+func (w yamlFrameWriter) Write(data []byte) (n int, err error) {
+	if _, err := w.w.Write([]byte("---\n")); err != nil {
+		return 0, err
+	}
+	return w.w.Write(data)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
new file mode 100644
index 0000000..df3f5f9
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// MetaFactory is used to store and retrieve the version and kind
+// information for JSON objects in a serializer.
+type MetaFactory interface {
+	// Interpret should return the version and kind of the wire-format of
+	// the object.
+	Interpret(data []byte) (*schema.GroupVersionKind, error)
+}
+
+// DefaultMetaFactory is a default factory for versioning objects in JSON. The object
+// in memory and in the default JSON serialization will use the "kind" and "apiVersion"
+// fields.
+var DefaultMetaFactory = SimpleMetaFactory{}
+
+// SimpleMetaFactory provides default methods for retrieving the type and version of objects
+// that are identified with an "apiVersion" and "kind" fields in their JSON
+// serialization. It may be parameterized with the names of the fields in memory, or an
+// optional list of base structs to search for those fields in memory.
+type SimpleMetaFactory struct {
+}
+
+// Interpret will return the APIVersion and Kind of the JSON wire-format
+// encoding of an object, or an error.
+func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
+	findKind := struct {
+		// +optional
+		APIVersion string `json:"apiVersion,omitempty"`
+		// +optional
+		Kind string `json:"kind,omitempty"`
+	}{}
+	if err := json.Unmarshal(data, &findKind); err != nil {
+		return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err)
+	}
+	gv, err := schema.ParseGroupVersion(findKind.APIVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
new file mode 100644
index 0000000..a42b4a4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serializer
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// TODO: We should split negotiated serializers that we can change versions on from those we can change
+// serialization formats on
+type negotiatedSerializerWrapper struct {
+	info runtime.SerializerInfo
+}
+
+func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer {
+	return &negotiatedSerializerWrapper{info}
+}
+
+func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo {
+	return []runtime.SerializerInfo{n.info}
+}
+
+func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder {
+	return e
+}
+
+func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder {
+	return d
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
new file mode 100644
index 0000000..72d0ac7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package protobuf provides a Kubernetes serializer for the protobuf format.
+package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
new file mode 100644
index 0000000..f606b7d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
@@ -0,0 +1,472 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package protobuf
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+	"reflect"
+
+	"github.com/gogo/protobuf/proto"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	"k8s.io/apimachinery/pkg/util/framer"
+)
+
+var (
+	// protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All
+	// proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth
+	// byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that
+	// the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2).
+	//
+	// See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message.
+	//
+	// This encoding scheme is experimental, and is subject to change at any time.
+	protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00}
+)
+
+type errNotMarshalable struct {
+	t reflect.Type
+}
+
+func (e errNotMarshalable) Error() string {
+	return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t)
+}
+
+func (e errNotMarshalable) Status() metav1.Status {
+	return metav1.Status{
+		Status:  metav1.StatusFailure,
+		Code:    http.StatusNotAcceptable,
+		Reason:  metav1.StatusReason("NotAcceptable"),
+		Message: e.Error(),
+	}
+}
+
+func IsNotMarshalable(err error) bool {
+	_, ok := err.(errNotMarshalable)
+	return err != nil && ok
+}
+
+// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
+// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
+// as-is (any type info passed with the object will be used).
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
+	return &Serializer{
+		prefix:  protoEncodingPrefix,
+		creater: creater,
+		typer:   typer,
+	}
+}
+
+type Serializer struct {
+	prefix  []byte
+	creater runtime.ObjectCreater
+	typer   runtime.ObjectTyper
+}
+
+var _ runtime.Serializer = &Serializer{}
+var _ recognizer.RecognizingDecoder = &Serializer{}
+
+const serializerIdentifier runtime.Identifier = "protobuf"
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	prefixLen := len(s.prefix)
+	switch {
+	case len(originalData) == 0:
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty data")
+	case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]):
+		return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix)
+	case len(originalData) == prefixLen:
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty body")
+	}
+
+	data := originalData[prefixLen:]
+	unk := runtime.Unknown{}
+	if err := unk.Unmarshal(data); err != nil {
+		return nil, nil, err
+	}
+
+	actual := unk.GroupVersionKind()
+	copyKindDefaults(&actual, gvk)
+
+	if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+		*intoUnknown = unk
+		if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok {
+			intoUnknown.ContentType = runtime.ContentTypeProtobuf
+		}
+		return intoUnknown, &actual, nil
+	}
+
+	if into != nil {
+		types, _, err := s.typer.ObjectKinds(into)
+		switch {
+		case runtime.IsNotRegisteredError(err):
+			pb, ok := into.(proto.Message)
+			if !ok {
+				return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
+			}
+			if err := proto.Unmarshal(unk.Raw, pb); err != nil {
+				return nil, &actual, err
+			}
+			return into, &actual, nil
+		case err != nil:
+			return nil, &actual, err
+		default:
+			copyKindDefaults(&actual, &types[0])
+			// if the result of defaulting did not set a version or group, ensure that at least group is set
+			// (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+			// of into is set if there is no better information from the caller or object.
+			if len(actual.Version) == 0 && len(actual.Group) == 0 {
+				actual.Group = types[0].Group
+			}
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta))
+	}
+	if len(actual.Version) == 0 {
+		return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta))
+	}
+
+	return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw)
+}
+
+// Encode serializes the provided object to the given writer.
+func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, w)
+	}
+	return s.doEncode(obj, w)
+}
+
+func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
+	prefixSize := uint64(len(s.prefix))
+
+	var unk runtime.Unknown
+	switch t := obj.(type) {
+	case *runtime.Unknown:
+		estimatedSize := prefixSize + uint64(t.Size())
+		data := make([]byte, estimatedSize)
+		i, err := t.MarshalTo(data[prefixSize:])
+		if err != nil {
+			return err
+		}
+		copy(data, s.prefix)
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+	default:
+		kind := obj.GetObjectKind().GroupVersionKind()
+		unk = runtime.Unknown{
+			TypeMeta: runtime.TypeMeta{
+				Kind:       kind.Kind,
+				APIVersion: kind.GroupVersion().String(),
+			},
+		}
+	}
+
+	switch t := obj.(type) {
+	case bufferedMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalToSizedBuffer methods
+		encodedSize := uint64(t.Size())
+		estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
+		data := make([]byte, estimatedSize)
+
+		i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize)
+		if err != nil {
+			return err
+		}
+
+		copy(data, s.prefix)
+
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+
+	case proto.Marshaler:
+		// this path performs extra allocations
+		data, err := t.Marshal()
+		if err != nil {
+			return err
+		}
+		unk.Raw = data
+
+		estimatedSize := prefixSize + uint64(unk.Size())
+		data = make([]byte, estimatedSize)
+
+		i, err := unk.MarshalTo(data[prefixSize:])
+		if err != nil {
+			return err
+		}
+
+		copy(data, s.prefix)
+
+		_, err = w.Write(data[:prefixSize+uint64(i)])
+		return err
+
+	default:
+		// TODO: marshal with a different content type and serializer (JSON for third party objects)
+		return errNotMarshalable{reflect.TypeOf(obj)}
+	}
+}
+
+// Identifier implements runtime.Encoder interface.
+func (s *Serializer) Identifier() runtime.Identifier {
+	return serializerIdentifier
+}
+
+// RecognizesData implements the RecognizingDecoder interface.
+func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) {
+	prefix := make([]byte, 4)
+	n, err := peek.Read(prefix)
+	if err != nil {
+		if err == io.EOF {
+			return false, false, nil
+		}
+		return false, false, err
+	}
+	if n != 4 {
+		return false, false, nil
+	}
+	return bytes.Equal(s.prefix, prefix), false, nil
+}
+
+// copyKindDefaults defaults dst to the value in src if dst does not have a value set.
+func copyKindDefaults(dst, src *schema.GroupVersionKind) {
+	if src == nil {
+		return
+	}
+	// apply kind and version defaulting from provided default
+	if len(dst.Kind) == 0 {
+		dst.Kind = src.Kind
+	}
+	if len(dst.Version) == 0 && len(src.Version) > 0 {
+		dst.Group = src.Group
+		dst.Version = src.Version
+	}
+}
+
+// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
+// byte buffers by pre-calculating the size of the final buffer needed.
+type bufferedMarshaller interface {
+	proto.Sizer
+	runtime.ProtobufMarshaller
+}
+
+// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently.
+type bufferedReverseMarshaller interface {
+	proto.Sizer
+	runtime.ProtobufReverseMarshaller
+}
+
+// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
+// object with a nil RawJSON struct and the expected size of the provided buffer. The
+// returned size will not be correct if RawJSOn is set on unk.
+func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
+	size := uint64(unk.Size())
+	// protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here),
+	// and the size of the array.
+	size += 1 + 8 + byteSize
+	return size
+}
+
+// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer
+// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the
+// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
+//
+// This encoding scheme is experimental, and is subject to change at any time.
+func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer {
+	return &RawSerializer{
+		creater: creater,
+		typer:   typer,
+	}
+}
+
+// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
+// type).
+type RawSerializer struct {
+	creater runtime.ObjectCreater
+	typer   runtime.ObjectTyper
+}
+
+var _ runtime.Serializer = &RawSerializer{}
+
+const rawSerializerIdentifier runtime.Identifier = "raw-protobuf"
+
+// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
+// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
+// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
+// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is
+// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
+// errors, the method will return the calculated schema kind.
+func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	if into == nil {
+		return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
+	}
+
+	if len(originalData) == 0 {
+		// TODO: treat like decoding {} from JSON with defaulting
+		return nil, nil, fmt.Errorf("empty data")
+	}
+	data := originalData
+
+	actual := &schema.GroupVersionKind{}
+	copyKindDefaults(actual, gvk)
+
+	if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
+		intoUnknown.Raw = data
+		intoUnknown.ContentEncoding = ""
+		intoUnknown.ContentType = runtime.ContentTypeProtobuf
+		intoUnknown.SetGroupVersionKind(*actual)
+		return intoUnknown, actual, nil
+	}
+
+	types, _, err := s.typer.ObjectKinds(into)
+	switch {
+	case runtime.IsNotRegisteredError(err):
+		pb, ok := into.(proto.Message)
+		if !ok {
+			return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
+		}
+		if err := proto.Unmarshal(data, pb); err != nil {
+			return nil, actual, err
+		}
+		return into, actual, nil
+	case err != nil:
+		return nil, actual, err
+	default:
+		copyKindDefaults(actual, &types[0])
+		// if the result of defaulting did not set a version or group, ensure that at least group is set
+		// (copyKindDefaults will not assign Group if version is already set). This guarantees that the group
+		// of into is set if there is no better information from the caller or object.
+		if len(actual.Version) == 0 && len(actual.Group) == 0 {
+			actual.Group = types[0].Group
+		}
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, actual, runtime.NewMissingKindErr("<protobuf encoded body - must provide default type>")
+	}
+	if len(actual.Version) == 0 {
+		return nil, actual, runtime.NewMissingVersionErr("<protobuf encoded body - must provide default type>")
+	}
+
+	return unmarshalToObject(s.typer, s.creater, actual, into, data)
+}
+
+// unmarshalToObject is the common code between decode in the raw and normal serializer.
+func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) {
+	// use the target if necessary
+	obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	pb, ok := obj.(proto.Message)
+	if !ok {
+		return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
+	}
+	if err := proto.Unmarshal(data, pb); err != nil {
+		return nil, actual, err
+	}
+	if actual != nil {
+		obj.GetObjectKind().SetGroupVersionKind(*actual)
+	}
+	return obj, actual, nil
+}
+
+// Encode serializes the provided object to the given writer. Overrides is ignored.
+func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(s.Identifier(), s.doEncode, w)
+	}
+	return s.doEncode(obj, w)
+}
+
+func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
+	switch t := obj.(type) {
+	case bufferedReverseMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalToSizedBuffer methods
+		encodedSize := uint64(t.Size())
+		data := make([]byte, encodedSize)
+
+		n, err := t.MarshalToSizedBuffer(data)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data[:n])
+		return err
+
+	case bufferedMarshaller:
+		// this path performs a single allocation during write but requires the caller to implement
+		// the more efficient Size and MarshalTo methods
+		encodedSize := uint64(t.Size())
+		data := make([]byte, encodedSize)
+
+		n, err := t.MarshalTo(data)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data[:n])
+		return err
+
+	case proto.Marshaler:
+		// this path performs extra allocations
+		data, err := t.Marshal()
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+
+	default:
+		return errNotMarshalable{reflect.TypeOf(obj)}
+	}
+}
+
+// Identifier implements runtime.Encoder interface.
+func (s *RawSerializer) Identifier() runtime.Identifier {
+	return rawSerializerIdentifier
+}
+
+var LengthDelimitedFramer = lengthDelimitedFramer{}
+
+type lengthDelimitedFramer struct{}
+
+// NewFrameWriter implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer {
+	return framer.NewLengthDelimitedFrameWriter(w)
+}
+
+// NewFrameReader implements stream framing for this serializer
+func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser {
+	return framer.NewLengthDelimitedFrameReader(r)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
new file mode 100644
index 0000000..38497ab
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package recognizer
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type RecognizingDecoder interface {
+	runtime.Decoder
+	// RecognizesData should return true if the input provided in the provided reader
+	// belongs to this decoder, or an error if the data could not be read or is ambiguous.
+	// Unknown is true if the data could not be determined to match the decoder type.
+	// Decoders should assume that they can read as much of peek as they need (as the caller
+	// provides) and may return unknown if the data provided is not sufficient to make a
+	// a determination. When peek returns EOF that may mean the end of the input or the
+	// end of buffered input - recognizers should return the best guess at that time.
+	RecognizesData(peek io.Reader) (ok, unknown bool, err error)
+}
+
+// NewDecoder creates a decoder that will attempt multiple decoders in an order defined
+// by:
+//
+// 1. The decoder implements RecognizingDecoder and identifies the data
+// 2. All other decoders, and any decoder that returned true for unknown.
+//
+// The order passed to the constructor is preserved within those priorities.
+func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder {
+	return &decoder{
+		decoders: decoders,
+	}
+}
+
+type decoder struct {
+	decoders []runtime.Decoder
+}
+
+var _ RecognizingDecoder = &decoder{}
+
+func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) {
+	var (
+		lastErr    error
+		anyUnknown bool
+	)
+	data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024)
+	for _, r := range d.decoders {
+		switch t := r.(type) {
+		case RecognizingDecoder:
+			ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data))
+			if err != nil {
+				lastErr = err
+				continue
+			}
+			anyUnknown = anyUnknown || unknown
+			if !ok {
+				continue
+			}
+			return true, false, nil
+		}
+	}
+	return false, anyUnknown, lastErr
+}
+
+func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	var (
+		lastErr error
+		skipped []runtime.Decoder
+	)
+
+	// try recognizers, record any decoders we need to give a chance later
+	for _, r := range d.decoders {
+		switch t := r.(type) {
+		case RecognizingDecoder:
+			buf := bytes.NewBuffer(data)
+			ok, unknown, err := t.RecognizesData(buf)
+			if err != nil {
+				lastErr = err
+				continue
+			}
+			if unknown {
+				skipped = append(skipped, t)
+				continue
+			}
+			if !ok {
+				continue
+			}
+			return r.Decode(data, gvk, into)
+		default:
+			skipped = append(skipped, t)
+		}
+	}
+
+	// try recognizers that returned unknown or didn't recognize their data
+	for _, r := range skipped {
+		out, actual, err := r.Decode(data, gvk, into)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		return out, actual, nil
+	}
+
+	if lastErr == nil {
+		lastErr = fmt.Errorf("no serialization format matched the provided data")
+	}
+	return nil, nil, lastErr
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
new file mode 100644
index 0000000..a60a7c0
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package streaming implements encoder and decoder for streams
+// of runtime.Objects over io.Writer/Readers.
+package streaming
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Encoder is a runtime.Encoder on a stream.
+type Encoder interface {
+	// Encode will write the provided object to the stream or return an error. It obeys the same
+	// contract as runtime.VersionedEncoder.
+	Encode(obj runtime.Object) error
+}
+
+// Decoder is a runtime.Decoder from a stream.
+type Decoder interface {
+	// Decode will return io.EOF when no more objects are available.
+	Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
+	// Close closes the underlying stream.
+	Close() error
+}
+
+// Serializer is a factory for creating encoders and decoders that work over streams.
+type Serializer interface {
+	NewEncoder(w io.Writer) Encoder
+	NewDecoder(r io.ReadCloser) Decoder
+}
+
+type decoder struct {
+	reader    io.ReadCloser
+	decoder   runtime.Decoder
+	buf       []byte
+	maxBytes  int
+	resetRead bool
+}
+
+// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
+// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
+// an entire object.
+func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
+	return &decoder{
+		reader:   r,
+		decoder:  d,
+		buf:      make([]byte, 1024),
+		maxBytes: 16 * 1024 * 1024,
+	}
+}
+
+var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
+
+// Decode reads the next object from the stream and decodes it.
+func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	base := 0
+	for {
+		n, err := d.reader.Read(d.buf[base:])
+		if err == io.ErrShortBuffer {
+			if n == 0 {
+				return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
+			}
+			if d.resetRead {
+				continue
+			}
+			// double the buffer size up to maxBytes
+			if len(d.buf) < d.maxBytes {
+				base += n
+				d.buf = append(d.buf, make([]byte, len(d.buf))...)
+				continue
+			}
+			// must read the rest of the frame (until we stop getting ErrShortBuffer)
+			d.resetRead = true
+			base = 0
+			return nil, nil, ErrObjectTooLarge
+		}
+		if err != nil {
+			return nil, nil, err
+		}
+		if d.resetRead {
+			// now that we have drained the large read, continue
+			d.resetRead = false
+			continue
+		}
+		base += n
+		break
+	}
+	return d.decoder.Decode(d.buf[:base], defaults, into)
+}
+
+func (d *decoder) Close() error {
+	return d.reader.Close()
+}
+
+type encoder struct {
+	writer  io.Writer
+	encoder runtime.Encoder
+	buf     *bytes.Buffer
+}
+
+// NewEncoder returns a new streaming encoder.
+func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
+	return &encoder{
+		writer:  w,
+		encoder: e,
+		buf:     &bytes.Buffer{},
+	}
+}
+
+// Encode writes the provided object to the nested writer.
+func (e *encoder) Encode(obj runtime.Object) error {
+	if err := e.encoder.Encode(obj, e.buf); err != nil {
+		return err
+	}
+	_, err := e.writer.Write(e.buf.Bytes())
+	e.buf.Reset()
+	return err
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
new file mode 100644
index 0000000..718c5df
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go
@@ -0,0 +1,250 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package versioning
+
+import (
+	"encoding/json"
+	"io"
+	"reflect"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/klog/v2"
+)
+
+// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
+func NewDefaultingCodecForScheme(
+	// TODO: I should be a scheme interface?
+	scheme *runtime.Scheme,
+	encoder runtime.Encoder,
+	decoder runtime.Decoder,
+	encodeVersion runtime.GroupVersioner,
+	decodeVersion runtime.GroupVersioner,
+) runtime.Codec {
+	return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion, scheme.Name())
+}
+
+// NewCodec takes objects in their internal versions and converts them to external versions before
+// serializing them. It assumes the serializer provided to it only deals with external versions.
+// This class is also a serializer, but is generally used with a specific version.
+func NewCodec(
+	encoder runtime.Encoder,
+	decoder runtime.Decoder,
+	convertor runtime.ObjectConvertor,
+	creater runtime.ObjectCreater,
+	typer runtime.ObjectTyper,
+	defaulter runtime.ObjectDefaulter,
+	encodeVersion runtime.GroupVersioner,
+	decodeVersion runtime.GroupVersioner,
+	originalSchemeName string,
+) runtime.Codec {
+	internal := &codec{
+		encoder:   encoder,
+		decoder:   decoder,
+		convertor: convertor,
+		creater:   creater,
+		typer:     typer,
+		defaulter: defaulter,
+
+		encodeVersion: encodeVersion,
+		decodeVersion: decodeVersion,
+
+		identifier: identifier(encodeVersion, encoder),
+
+		originalSchemeName: originalSchemeName,
+	}
+	return internal
+}
+
+type codec struct {
+	encoder   runtime.Encoder
+	decoder   runtime.Decoder
+	convertor runtime.ObjectConvertor
+	creater   runtime.ObjectCreater
+	typer     runtime.ObjectTyper
+	defaulter runtime.ObjectDefaulter
+
+	encodeVersion runtime.GroupVersioner
+	decodeVersion runtime.GroupVersioner
+
+	identifier runtime.Identifier
+
+	// originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
+	originalSchemeName string
+}
+
+var identifiersMap sync.Map
+
+type codecIdentifier struct {
+	EncodeGV string `json:"encodeGV,omitempty"`
+	Encoder  string `json:"encoder,omitempty"`
+	Name     string `json:"name,omitempty"`
+}
+
+// identifier computes Identifier of Encoder based on codec parameters.
+func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier {
+	result := codecIdentifier{
+		Name: "versioning",
+	}
+
+	if encodeGV != nil {
+		result.EncodeGV = encodeGV.Identifier()
+	}
+	if encoder != nil {
+		result.Encoder = string(encoder.Identifier())
+	}
+	if id, ok := identifiersMap.Load(result); ok {
+		return id.(runtime.Identifier)
+	}
+	identifier, err := json.Marshal(result)
+	if err != nil {
+		klog.Fatalf("Failed marshaling identifier for codec: %v", err)
+	}
+	identifiersMap.Store(result, runtime.Identifier(identifier))
+	return runtime.Identifier(identifier)
+}
+
+// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
+// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
+// into that matches the serialized version.
+func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	// If the into object is unstructured and expresses an opinion about its group/version,
+	// create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
+	decodeInto := into
+	if into != nil {
+		if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() {
+			decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object)
+		}
+	}
+
+	obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto)
+	if err != nil {
+		return nil, gvk, err
+	}
+
+	if d, ok := obj.(runtime.NestedObjectDecoder); ok {
+		if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
+			return nil, gvk, err
+		}
+	}
+
+	// if we specify a target, use generic conversion.
+	if into != nil {
+		// perform defaulting if requested
+		if c.defaulter != nil {
+			c.defaulter.Default(obj)
+		}
+
+		// Short-circuit conversion if the into object is same object
+		if into == obj {
+			return into, gvk, nil
+		}
+
+		if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
+			return nil, gvk, err
+		}
+
+		return into, gvk, nil
+	}
+
+	// perform defaulting if requested
+	if c.defaulter != nil {
+		c.defaulter.Default(obj)
+	}
+
+	out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion)
+	if err != nil {
+		return nil, gvk, err
+	}
+	return out, gvk, nil
+}
+
+// Encode ensures the provided object is output in the appropriate group and version, invoking
+// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
+func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
+	if co, ok := obj.(runtime.CacheableObject); ok {
+		return co.CacheEncode(c.Identifier(), c.doEncode, w)
+	}
+	return c.doEncode(obj, w)
+}
+
+func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
+	switch obj := obj.(type) {
+	case *runtime.Unknown:
+		return c.encoder.Encode(obj, w)
+	case runtime.Unstructured:
+		// An unstructured list can contain objects of multiple group version kinds. don't short-circuit just
+		// because the top-level type matches our desired destination type. actually send the object to the converter
+		// to give it a chance to convert the list items if needed.
+		if _, ok := obj.(*unstructured.UnstructuredList); !ok {
+			// avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl)
+			objGVK := obj.GetObjectKind().GroupVersionKind()
+			if len(objGVK.Version) == 0 {
+				return c.encoder.Encode(obj, w)
+			}
+			targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK})
+			if !ok {
+				return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion)
+			}
+			if targetGVK == objGVK {
+				return c.encoder.Encode(obj, w)
+			}
+		}
+	}
+
+	gvks, isUnversioned, err := c.typer.ObjectKinds(obj)
+	if err != nil {
+		return err
+	}
+
+	objectKind := obj.GetObjectKind()
+	old := objectKind.GroupVersionKind()
+	// restore the old GVK after encoding
+	defer objectKind.SetGroupVersionKind(old)
+
+	if c.encodeVersion == nil || isUnversioned {
+		if e, ok := obj.(runtime.NestedObjectEncoder); ok {
+			if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+				return err
+			}
+		}
+		objectKind.SetGroupVersionKind(gvks[0])
+		return c.encoder.Encode(obj, w)
+	}
+
+	// Perform a conversion if necessary
+	out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion)
+	if err != nil {
+		return err
+	}
+
+	if e, ok := out.(runtime.NestedObjectEncoder); ok {
+		if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
+			return err
+		}
+	}
+
+	// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
+	return c.encoder.Encode(out, w)
+}
+
+// Identifier implements runtime.Encoder interface.
+func (c *codec) Identifier() runtime.Identifier {
+	return c.identifier
+}