[VOL-4291] OfAgent changes for gRPC migration

Change-Id: I8da1db6df49d478ef24ec8f9bd719e9692f48a7f
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
deleted file mode 100644
index 9f55693..0000000
--- a/vendor/gopkg.in/yaml.v2/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-
-go:
-    - 1.4
-    - 1.5
-    - 1.6
-    - 1.7
-    - 1.8
-    - 1.9
-    - tip
-
-go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/gopkg.in/yaml.v2/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
deleted file mode 100644
index 8da58fb..0000000
--- a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
+++ /dev/null
@@ -1,31 +0,0 @@
-The following files were ported to Go from C files of libyaml, and thus
-are still covered by their original copyright and license:
-
-    apic.go
-    emitterc.go
-    parserc.go
-    readerc.go
-    scannerc.go
-    writerc.go
-    yamlh.go
-    yamlprivateh.go
-
-Copyright (c) 2006 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
deleted file mode 100644
index 91679b5..0000000
--- a/vendor/gopkg.in/yaml.v2/decode.go
+++ /dev/null
@@ -1,788 +0,0 @@
-package yaml
-
-import (
-	"encoding"
-	"encoding/base64"
-	"fmt"
-	"io"
-	"math"
-	"reflect"
-	"strconv"
-	"time"
-)
-
-const (
-	documentNode = 1 << iota
-	mappingNode
-	sequenceNode
-	scalarNode
-	aliasNode
-)
-
-type node struct {
-	kind         int
-	line, column int
-	tag          string
-	// For an alias node, alias holds the resolved alias.
-	alias    *node
-	value    string
-	implicit bool
-	children []*node
-	anchors  map[string]*node
-}
-
-// ----------------------------------------------------------------------------
-// Parser, produces a node tree out of a libyaml event stream.
-
-type parser struct {
-	parser   yaml_parser_t
-	event    yaml_event_t
-	doc      *node
-	doneInit bool
-}
-
-func newParser(b []byte) *parser {
-	p := parser{}
-	if !yaml_parser_initialize(&p.parser) {
-		panic("failed to initialize YAML emitter")
-	}
-	if len(b) == 0 {
-		b = []byte{'\n'}
-	}
-	yaml_parser_set_input_string(&p.parser, b)
-	return &p
-}
-
-func newParserFromReader(r io.Reader) *parser {
-	p := parser{}
-	if !yaml_parser_initialize(&p.parser) {
-		panic("failed to initialize YAML emitter")
-	}
-	yaml_parser_set_input_reader(&p.parser, r)
-	return &p
-}
-
-func (p *parser) init() {
-	if p.doneInit {
-		return
-	}
-	p.expect(yaml_STREAM_START_EVENT)
-	p.doneInit = true
-}
-
-func (p *parser) destroy() {
-	if p.event.typ != yaml_NO_EVENT {
-		yaml_event_delete(&p.event)
-	}
-	yaml_parser_delete(&p.parser)
-}
-
-// expect consumes an event from the event stream and
-// checks that it's of the expected type.
-func (p *parser) expect(e yaml_event_type_t) {
-	if p.event.typ == yaml_NO_EVENT {
-		if !yaml_parser_parse(&p.parser, &p.event) {
-			p.fail()
-		}
-	}
-	if p.event.typ == yaml_STREAM_END_EVENT {
-		failf("attempted to go past the end of stream; corrupted value?")
-	}
-	if p.event.typ != e {
-		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
-		p.fail()
-	}
-	yaml_event_delete(&p.event)
-	p.event.typ = yaml_NO_EVENT
-}
-
-// peek peeks at the next event in the event stream,
-// puts the results into p.event and returns the event type.
-func (p *parser) peek() yaml_event_type_t {
-	if p.event.typ != yaml_NO_EVENT {
-		return p.event.typ
-	}
-	if !yaml_parser_parse(&p.parser, &p.event) {
-		p.fail()
-	}
-	return p.event.typ
-}
-
-func (p *parser) fail() {
-	var where string
-	var line int
-	if p.parser.problem_mark.line != 0 {
-		line = p.parser.problem_mark.line
-		// Scanner errors don't iterate line before returning error
-		if p.parser.error == yaml_SCANNER_ERROR {
-			line++
-		}
-	} else if p.parser.context_mark.line != 0 {
-		line = p.parser.context_mark.line
-	}
-	if line != 0 {
-		where = "line " + strconv.Itoa(line) + ": "
-	}
-	var msg string
-	if len(p.parser.problem) > 0 {
-		msg = p.parser.problem
-	} else {
-		msg = "unknown problem parsing YAML content"
-	}
-	failf("%s%s", where, msg)
-}
-
-func (p *parser) anchor(n *node, anchor []byte) {
-	if anchor != nil {
-		p.doc.anchors[string(anchor)] = n
-	}
-}
-
-func (p *parser) parse() *node {
-	p.init()
-	switch p.peek() {
-	case yaml_SCALAR_EVENT:
-		return p.scalar()
-	case yaml_ALIAS_EVENT:
-		return p.alias()
-	case yaml_MAPPING_START_EVENT:
-		return p.mapping()
-	case yaml_SEQUENCE_START_EVENT:
-		return p.sequence()
-	case yaml_DOCUMENT_START_EVENT:
-		return p.document()
-	case yaml_STREAM_END_EVENT:
-		// Happens when attempting to decode an empty buffer.
-		return nil
-	default:
-		panic("attempted to parse unknown event: " + p.event.typ.String())
-	}
-}
-
-func (p *parser) node(kind int) *node {
-	return &node{
-		kind:   kind,
-		line:   p.event.start_mark.line,
-		column: p.event.start_mark.column,
-	}
-}
-
-func (p *parser) document() *node {
-	n := p.node(documentNode)
-	n.anchors = make(map[string]*node)
-	p.doc = n
-	p.expect(yaml_DOCUMENT_START_EVENT)
-	n.children = append(n.children, p.parse())
-	p.expect(yaml_DOCUMENT_END_EVENT)
-	return n
-}
-
-func (p *parser) alias() *node {
-	n := p.node(aliasNode)
-	n.value = string(p.event.anchor)
-	n.alias = p.doc.anchors[n.value]
-	if n.alias == nil {
-		failf("unknown anchor '%s' referenced", n.value)
-	}
-	p.expect(yaml_ALIAS_EVENT)
-	return n
-}
-
-func (p *parser) scalar() *node {
-	n := p.node(scalarNode)
-	n.value = string(p.event.value)
-	n.tag = string(p.event.tag)
-	n.implicit = p.event.implicit
-	p.anchor(n, p.event.anchor)
-	p.expect(yaml_SCALAR_EVENT)
-	return n
-}
-
-func (p *parser) sequence() *node {
-	n := p.node(sequenceNode)
-	p.anchor(n, p.event.anchor)
-	p.expect(yaml_SEQUENCE_START_EVENT)
-	for p.peek() != yaml_SEQUENCE_END_EVENT {
-		n.children = append(n.children, p.parse())
-	}
-	p.expect(yaml_SEQUENCE_END_EVENT)
-	return n
-}
-
-func (p *parser) mapping() *node {
-	n := p.node(mappingNode)
-	p.anchor(n, p.event.anchor)
-	p.expect(yaml_MAPPING_START_EVENT)
-	for p.peek() != yaml_MAPPING_END_EVENT {
-		n.children = append(n.children, p.parse(), p.parse())
-	}
-	p.expect(yaml_MAPPING_END_EVENT)
-	return n
-}
-
-// ----------------------------------------------------------------------------
-// Decoder, unmarshals a node into a provided value.
-
-type decoder struct {
-	doc     *node
-	aliases map[*node]bool
-	mapType reflect.Type
-	terrors []string
-	strict  bool
-
-	decodeCount int
-	aliasCount  int
-	aliasDepth  int
-}
-
-var (
-	mapItemType    = reflect.TypeOf(MapItem{})
-	durationType   = reflect.TypeOf(time.Duration(0))
-	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
-	ifaceType      = defaultMapType.Elem()
-	timeType       = reflect.TypeOf(time.Time{})
-	ptrTimeType    = reflect.TypeOf(&time.Time{})
-)
-
-func newDecoder(strict bool) *decoder {
-	d := &decoder{mapType: defaultMapType, strict: strict}
-	d.aliases = make(map[*node]bool)
-	return d
-}
-
-func (d *decoder) terror(n *node, tag string, out reflect.Value) {
-	if n.tag != "" {
-		tag = n.tag
-	}
-	value := n.value
-	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
-		if len(value) > 10 {
-			value = " `" + value[:7] + "...`"
-		} else {
-			value = " `" + value + "`"
-		}
-	}
-	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
-}
-
-func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
-	terrlen := len(d.terrors)
-	err := u.UnmarshalYAML(func(v interface{}) (err error) {
-		defer handleErr(&err)
-		d.unmarshal(n, reflect.ValueOf(v))
-		if len(d.terrors) > terrlen {
-			issues := d.terrors[terrlen:]
-			d.terrors = d.terrors[:terrlen]
-			return &TypeError{issues}
-		}
-		return nil
-	})
-	if e, ok := err.(*TypeError); ok {
-		d.terrors = append(d.terrors, e.Errors...)
-		return false
-	}
-	if err != nil {
-		fail(err)
-	}
-	return true
-}
-
-// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
-// if a value is found to implement it.
-// It returns the initialized and dereferenced out value, whether
-// unmarshalling was already done by UnmarshalYAML, and if so whether
-// its types unmarshalled appropriately.
-//
-// If n holds a null value, prepare returns before doing anything.
-func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
-	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
-		return out, false, false
-	}
-	again := true
-	for again {
-		again = false
-		if out.Kind() == reflect.Ptr {
-			if out.IsNil() {
-				out.Set(reflect.New(out.Type().Elem()))
-			}
-			out = out.Elem()
-			again = true
-		}
-		if out.CanAddr() {
-			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
-				good = d.callUnmarshaler(n, u)
-				return out, true, good
-			}
-		}
-	}
-	return out, false, false
-}
-
-func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
-	d.decodeCount++
-	if d.aliasDepth > 0 {
-		d.aliasCount++
-	}
-	if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > 0.99 {
-		failf("document contains excessive aliasing")
-	}
-	switch n.kind {
-	case documentNode:
-		return d.document(n, out)
-	case aliasNode:
-		return d.alias(n, out)
-	}
-	out, unmarshaled, good := d.prepare(n, out)
-	if unmarshaled {
-		return good
-	}
-	switch n.kind {
-	case scalarNode:
-		good = d.scalar(n, out)
-	case mappingNode:
-		good = d.mapping(n, out)
-	case sequenceNode:
-		good = d.sequence(n, out)
-	default:
-		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
-	}
-	return good
-}
-
-func (d *decoder) document(n *node, out reflect.Value) (good bool) {
-	if len(n.children) == 1 {
-		d.doc = n
-		d.unmarshal(n.children[0], out)
-		return true
-	}
-	return false
-}
-
-func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
-	if d.aliases[n] {
-		// TODO this could actually be allowed in some circumstances.
-		failf("anchor '%s' value contains itself", n.value)
-	}
-	d.aliases[n] = true
-	d.aliasDepth++
-	good = d.unmarshal(n.alias, out)
-	d.aliasDepth--
-	delete(d.aliases, n)
-	return good
-}
-
-var zeroValue reflect.Value
-
-func resetMap(out reflect.Value) {
-	for _, k := range out.MapKeys() {
-		out.SetMapIndex(k, zeroValue)
-	}
-}
-
-func (d *decoder) scalar(n *node, out reflect.Value) bool {
-	var tag string
-	var resolved interface{}
-	if n.tag == "" && !n.implicit {
-		tag = yaml_STR_TAG
-		resolved = n.value
-	} else {
-		tag, resolved = resolve(n.tag, n.value)
-		if tag == yaml_BINARY_TAG {
-			data, err := base64.StdEncoding.DecodeString(resolved.(string))
-			if err != nil {
-				failf("!!binary value contains invalid base64 data")
-			}
-			resolved = string(data)
-		}
-	}
-	if resolved == nil {
-		if out.Kind() == reflect.Map && !out.CanAddr() {
-			resetMap(out)
-		} else {
-			out.Set(reflect.Zero(out.Type()))
-		}
-		return true
-	}
-	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
-		// We've resolved to exactly the type we want, so use that.
-		out.Set(resolvedv)
-		return true
-	}
-	// Perhaps we can use the value as a TextUnmarshaler to
-	// set its value.
-	if out.CanAddr() {
-		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
-		if ok {
-			var text []byte
-			if tag == yaml_BINARY_TAG {
-				text = []byte(resolved.(string))
-			} else {
-				// We let any value be unmarshaled into TextUnmarshaler.
-				// That might be more lax than we'd like, but the
-				// TextUnmarshaler itself should bowl out any dubious values.
-				text = []byte(n.value)
-			}
-			err := u.UnmarshalText(text)
-			if err != nil {
-				fail(err)
-			}
-			return true
-		}
-	}
-	switch out.Kind() {
-	case reflect.String:
-		if tag == yaml_BINARY_TAG {
-			out.SetString(resolved.(string))
-			return true
-		}
-		if resolved != nil {
-			out.SetString(n.value)
-			return true
-		}
-	case reflect.Interface:
-		if resolved == nil {
-			out.Set(reflect.Zero(out.Type()))
-		} else if tag == yaml_TIMESTAMP_TAG {
-			// It looks like a timestamp but for backward compatibility
-			// reasons we set it as a string, so that code that unmarshals
-			// timestamp-like values into interface{} will continue to
-			// see a string and not a time.Time.
-			// TODO(v3) Drop this.
-			out.Set(reflect.ValueOf(n.value))
-		} else {
-			out.Set(reflect.ValueOf(resolved))
-		}
-		return true
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		switch resolved := resolved.(type) {
-		case int:
-			if !out.OverflowInt(int64(resolved)) {
-				out.SetInt(int64(resolved))
-				return true
-			}
-		case int64:
-			if !out.OverflowInt(resolved) {
-				out.SetInt(resolved)
-				return true
-			}
-		case uint64:
-			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
-				out.SetInt(int64(resolved))
-				return true
-			}
-		case float64:
-			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
-				out.SetInt(int64(resolved))
-				return true
-			}
-		case string:
-			if out.Type() == durationType {
-				d, err := time.ParseDuration(resolved)
-				if err == nil {
-					out.SetInt(int64(d))
-					return true
-				}
-			}
-		}
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		switch resolved := resolved.(type) {
-		case int:
-			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		case int64:
-			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		case uint64:
-			if !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		case float64:
-			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
-				out.SetUint(uint64(resolved))
-				return true
-			}
-		}
-	case reflect.Bool:
-		switch resolved := resolved.(type) {
-		case bool:
-			out.SetBool(resolved)
-			return true
-		}
-	case reflect.Float32, reflect.Float64:
-		switch resolved := resolved.(type) {
-		case int:
-			out.SetFloat(float64(resolved))
-			return true
-		case int64:
-			out.SetFloat(float64(resolved))
-			return true
-		case uint64:
-			out.SetFloat(float64(resolved))
-			return true
-		case float64:
-			out.SetFloat(resolved)
-			return true
-		}
-	case reflect.Struct:
-		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
-			out.Set(resolvedv)
-			return true
-		}
-	case reflect.Ptr:
-		if out.Type().Elem() == reflect.TypeOf(resolved) {
-			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
-			elem := reflect.New(out.Type().Elem())
-			elem.Elem().Set(reflect.ValueOf(resolved))
-			out.Set(elem)
-			return true
-		}
-	}
-	d.terror(n, tag, out)
-	return false
-}
-
-func settableValueOf(i interface{}) reflect.Value {
-	v := reflect.ValueOf(i)
-	sv := reflect.New(v.Type()).Elem()
-	sv.Set(v)
-	return sv
-}
-
-func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
-	l := len(n.children)
-
-	var iface reflect.Value
-	switch out.Kind() {
-	case reflect.Slice:
-		out.Set(reflect.MakeSlice(out.Type(), l, l))
-	case reflect.Array:
-		if l != out.Len() {
-			failf("invalid array: want %d elements but got %d", out.Len(), l)
-		}
-	case reflect.Interface:
-		// No type hints. Will have to use a generic sequence.
-		iface = out
-		out = settableValueOf(make([]interface{}, l))
-	default:
-		d.terror(n, yaml_SEQ_TAG, out)
-		return false
-	}
-	et := out.Type().Elem()
-
-	j := 0
-	for i := 0; i < l; i++ {
-		e := reflect.New(et).Elem()
-		if ok := d.unmarshal(n.children[i], e); ok {
-			out.Index(j).Set(e)
-			j++
-		}
-	}
-	if out.Kind() != reflect.Array {
-		out.Set(out.Slice(0, j))
-	}
-	if iface.IsValid() {
-		iface.Set(out)
-	}
-	return true
-}
-
-func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
-	switch out.Kind() {
-	case reflect.Struct:
-		return d.mappingStruct(n, out)
-	case reflect.Slice:
-		return d.mappingSlice(n, out)
-	case reflect.Map:
-		// okay
-	case reflect.Interface:
-		if d.mapType.Kind() == reflect.Map {
-			iface := out
-			out = reflect.MakeMap(d.mapType)
-			iface.Set(out)
-		} else {
-			slicev := reflect.New(d.mapType).Elem()
-			if !d.mappingSlice(n, slicev) {
-				return false
-			}
-			out.Set(slicev)
-			return true
-		}
-	default:
-		d.terror(n, yaml_MAP_TAG, out)
-		return false
-	}
-	outt := out.Type()
-	kt := outt.Key()
-	et := outt.Elem()
-
-	mapType := d.mapType
-	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
-		d.mapType = outt
-	}
-
-	if out.IsNil() {
-		out.Set(reflect.MakeMap(outt))
-	}
-	l := len(n.children)
-	for i := 0; i < l; i += 2 {
-		if isMerge(n.children[i]) {
-			d.merge(n.children[i+1], out)
-			continue
-		}
-		k := reflect.New(kt).Elem()
-		if d.unmarshal(n.children[i], k) {
-			kkind := k.Kind()
-			if kkind == reflect.Interface {
-				kkind = k.Elem().Kind()
-			}
-			if kkind == reflect.Map || kkind == reflect.Slice {
-				failf("invalid map key: %#v", k.Interface())
-			}
-			e := reflect.New(et).Elem()
-			if d.unmarshal(n.children[i+1], e) {
-				d.setMapIndex(n.children[i+1], out, k, e)
-			}
-		}
-	}
-	d.mapType = mapType
-	return true
-}
-
-func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
-	if d.strict && out.MapIndex(k) != zeroValue {
-		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
-		return
-	}
-	out.SetMapIndex(k, v)
-}
-
-func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
-	outt := out.Type()
-	if outt.Elem() != mapItemType {
-		d.terror(n, yaml_MAP_TAG, out)
-		return false
-	}
-
-	mapType := d.mapType
-	d.mapType = outt
-
-	var slice []MapItem
-	var l = len(n.children)
-	for i := 0; i < l; i += 2 {
-		if isMerge(n.children[i]) {
-			d.merge(n.children[i+1], out)
-			continue
-		}
-		item := MapItem{}
-		k := reflect.ValueOf(&item.Key).Elem()
-		if d.unmarshal(n.children[i], k) {
-			v := reflect.ValueOf(&item.Value).Elem()
-			if d.unmarshal(n.children[i+1], v) {
-				slice = append(slice, item)
-			}
-		}
-	}
-	out.Set(reflect.ValueOf(slice))
-	d.mapType = mapType
-	return true
-}
-
-func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
-	sinfo, err := getStructInfo(out.Type())
-	if err != nil {
-		panic(err)
-	}
-	name := settableValueOf("")
-	l := len(n.children)
-
-	var inlineMap reflect.Value
-	var elemType reflect.Type
-	if sinfo.InlineMap != -1 {
-		inlineMap = out.Field(sinfo.InlineMap)
-		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
-		elemType = inlineMap.Type().Elem()
-	}
-
-	var doneFields []bool
-	if d.strict {
-		doneFields = make([]bool, len(sinfo.FieldsList))
-	}
-	for i := 0; i < l; i += 2 {
-		ni := n.children[i]
-		if isMerge(ni) {
-			d.merge(n.children[i+1], out)
-			continue
-		}
-		if !d.unmarshal(ni, name) {
-			continue
-		}
-		if info, ok := sinfo.FieldsMap[name.String()]; ok {
-			if d.strict {
-				if doneFields[info.Id] {
-					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
-					continue
-				}
-				doneFields[info.Id] = true
-			}
-			var field reflect.Value
-			if info.Inline == nil {
-				field = out.Field(info.Num)
-			} else {
-				field = out.FieldByIndex(info.Inline)
-			}
-			d.unmarshal(n.children[i+1], field)
-		} else if sinfo.InlineMap != -1 {
-			if inlineMap.IsNil() {
-				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
-			}
-			value := reflect.New(elemType).Elem()
-			d.unmarshal(n.children[i+1], value)
-			d.setMapIndex(n.children[i+1], inlineMap, name, value)
-		} else if d.strict {
-			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
-		}
-	}
-	return true
-}
-
-func failWantMap() {
-	failf("map merge requires map or sequence of maps as the value")
-}
-
-func (d *decoder) merge(n *node, out reflect.Value) {
-	switch n.kind {
-	case mappingNode:
-		d.unmarshal(n, out)
-	case aliasNode:
-		an, ok := d.doc.anchors[n.value]
-		if ok && an.kind != mappingNode {
-			failWantMap()
-		}
-		d.unmarshal(n, out)
-	case sequenceNode:
-		// Step backwards as earlier nodes take precedence.
-		for i := len(n.children) - 1; i >= 0; i-- {
-			ni := n.children[i]
-			if ni.kind == aliasNode {
-				an, ok := d.doc.anchors[ni.value]
-				if ok && an.kind != mappingNode {
-					failWantMap()
-				}
-			} else if ni.kind != mappingNode {
-				failWantMap()
-			}
-			d.unmarshal(ni, out)
-		}
-	default:
-		failWantMap()
-	}
-}
-
-func isMerge(n *node) bool {
-	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
-}
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go
deleted file mode 100644
index 0ee738e..0000000
--- a/vendor/gopkg.in/yaml.v2/encode.go
+++ /dev/null
@@ -1,390 +0,0 @@
-package yaml
-
-import (
-	"encoding"
-	"fmt"
-	"io"
-	"reflect"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"time"
-	"unicode/utf8"
-)
-
-// jsonNumber is the interface of the encoding/json.Number datatype.
-// Repeating the interface here avoids a dependency on encoding/json, and also
-// supports other libraries like jsoniter, which use a similar datatype with
-// the same interface. Detecting this interface is useful when dealing with
-// structures containing json.Number, which is a string under the hood. The
-// encoder should prefer the use of Int64(), Float64() and string(), in that
-// order, when encoding this type.
-type jsonNumber interface {
-	Float64() (float64, error)
-	Int64() (int64, error)
-	String() string
-}
-
-type encoder struct {
-	emitter yaml_emitter_t
-	event   yaml_event_t
-	out     []byte
-	flow    bool
-	// doneInit holds whether the initial stream_start_event has been
-	// emitted.
-	doneInit bool
-}
-
-func newEncoder() *encoder {
-	e := &encoder{}
-	yaml_emitter_initialize(&e.emitter)
-	yaml_emitter_set_output_string(&e.emitter, &e.out)
-	yaml_emitter_set_unicode(&e.emitter, true)
-	return e
-}
-
-func newEncoderWithWriter(w io.Writer) *encoder {
-	e := &encoder{}
-	yaml_emitter_initialize(&e.emitter)
-	yaml_emitter_set_output_writer(&e.emitter, w)
-	yaml_emitter_set_unicode(&e.emitter, true)
-	return e
-}
-
-func (e *encoder) init() {
-	if e.doneInit {
-		return
-	}
-	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
-	e.emit()
-	e.doneInit = true
-}
-
-func (e *encoder) finish() {
-	e.emitter.open_ended = false
-	yaml_stream_end_event_initialize(&e.event)
-	e.emit()
-}
-
-func (e *encoder) destroy() {
-	yaml_emitter_delete(&e.emitter)
-}
-
-func (e *encoder) emit() {
-	// This will internally delete the e.event value.
-	e.must(yaml_emitter_emit(&e.emitter, &e.event))
-}
-
-func (e *encoder) must(ok bool) {
-	if !ok {
-		msg := e.emitter.problem
-		if msg == "" {
-			msg = "unknown problem generating YAML content"
-		}
-		failf("%s", msg)
-	}
-}
-
-func (e *encoder) marshalDoc(tag string, in reflect.Value) {
-	e.init()
-	yaml_document_start_event_initialize(&e.event, nil, nil, true)
-	e.emit()
-	e.marshal(tag, in)
-	yaml_document_end_event_initialize(&e.event, true)
-	e.emit()
-}
-
-func (e *encoder) marshal(tag string, in reflect.Value) {
-	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
-		e.nilv()
-		return
-	}
-	iface := in.Interface()
-	switch m := iface.(type) {
-	case jsonNumber:
-		integer, err := m.Int64()
-		if err == nil {
-			// In this case the json.Number is a valid int64
-			in = reflect.ValueOf(integer)
-			break
-		}
-		float, err := m.Float64()
-		if err == nil {
-			// In this case the json.Number is a valid float64
-			in = reflect.ValueOf(float)
-			break
-		}
-		// fallback case - no number could be obtained
-		in = reflect.ValueOf(m.String())
-	case time.Time, *time.Time:
-		// Although time.Time implements TextMarshaler,
-		// we don't want to treat it as a string for YAML
-		// purposes because YAML has special support for
-		// timestamps.
-	case Marshaler:
-		v, err := m.MarshalYAML()
-		if err != nil {
-			fail(err)
-		}
-		if v == nil {
-			e.nilv()
-			return
-		}
-		in = reflect.ValueOf(v)
-	case encoding.TextMarshaler:
-		text, err := m.MarshalText()
-		if err != nil {
-			fail(err)
-		}
-		in = reflect.ValueOf(string(text))
-	case nil:
-		e.nilv()
-		return
-	}
-	switch in.Kind() {
-	case reflect.Interface:
-		e.marshal(tag, in.Elem())
-	case reflect.Map:
-		e.mapv(tag, in)
-	case reflect.Ptr:
-		if in.Type() == ptrTimeType {
-			e.timev(tag, in.Elem())
-		} else {
-			e.marshal(tag, in.Elem())
-		}
-	case reflect.Struct:
-		if in.Type() == timeType {
-			e.timev(tag, in)
-		} else {
-			e.structv(tag, in)
-		}
-	case reflect.Slice, reflect.Array:
-		if in.Type().Elem() == mapItemType {
-			e.itemsv(tag, in)
-		} else {
-			e.slicev(tag, in)
-		}
-	case reflect.String:
-		e.stringv(tag, in)
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		if in.Type() == durationType {
-			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
-		} else {
-			e.intv(tag, in)
-		}
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		e.uintv(tag, in)
-	case reflect.Float32, reflect.Float64:
-		e.floatv(tag, in)
-	case reflect.Bool:
-		e.boolv(tag, in)
-	default:
-		panic("cannot marshal type: " + in.Type().String())
-	}
-}
-
-func (e *encoder) mapv(tag string, in reflect.Value) {
-	e.mappingv(tag, func() {
-		keys := keyList(in.MapKeys())
-		sort.Sort(keys)
-		for _, k := range keys {
-			e.marshal("", k)
-			e.marshal("", in.MapIndex(k))
-		}
-	})
-}
-
-func (e *encoder) itemsv(tag string, in reflect.Value) {
-	e.mappingv(tag, func() {
-		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
-		for _, item := range slice {
-			e.marshal("", reflect.ValueOf(item.Key))
-			e.marshal("", reflect.ValueOf(item.Value))
-		}
-	})
-}
-
-func (e *encoder) structv(tag string, in reflect.Value) {
-	sinfo, err := getStructInfo(in.Type())
-	if err != nil {
-		panic(err)
-	}
-	e.mappingv(tag, func() {
-		for _, info := range sinfo.FieldsList {
-			var value reflect.Value
-			if info.Inline == nil {
-				value = in.Field(info.Num)
-			} else {
-				value = in.FieldByIndex(info.Inline)
-			}
-			if info.OmitEmpty && isZero(value) {
-				continue
-			}
-			e.marshal("", reflect.ValueOf(info.Key))
-			e.flow = info.Flow
-			e.marshal("", value)
-		}
-		if sinfo.InlineMap >= 0 {
-			m := in.Field(sinfo.InlineMap)
-			if m.Len() > 0 {
-				e.flow = false
-				keys := keyList(m.MapKeys())
-				sort.Sort(keys)
-				for _, k := range keys {
-					if _, found := sinfo.FieldsMap[k.String()]; found {
-						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
-					}
-					e.marshal("", k)
-					e.flow = false
-					e.marshal("", m.MapIndex(k))
-				}
-			}
-		}
-	})
-}
-
-func (e *encoder) mappingv(tag string, f func()) {
-	implicit := tag == ""
-	style := yaml_BLOCK_MAPPING_STYLE
-	if e.flow {
-		e.flow = false
-		style = yaml_FLOW_MAPPING_STYLE
-	}
-	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
-	e.emit()
-	f()
-	yaml_mapping_end_event_initialize(&e.event)
-	e.emit()
-}
-
-func (e *encoder) slicev(tag string, in reflect.Value) {
-	implicit := tag == ""
-	style := yaml_BLOCK_SEQUENCE_STYLE
-	if e.flow {
-		e.flow = false
-		style = yaml_FLOW_SEQUENCE_STYLE
-	}
-	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
-	e.emit()
-	n := in.Len()
-	for i := 0; i < n; i++ {
-		e.marshal("", in.Index(i))
-	}
-	e.must(yaml_sequence_end_event_initialize(&e.event))
-	e.emit()
-}
-
-// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
-//
-// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
-// in YAML 1.2 and by this package, but these should be marshalled quoted for
-// the time being for compatibility with other parsers.
-func isBase60Float(s string) (result bool) {
-	// Fast path.
-	if s == "" {
-		return false
-	}
-	c := s[0]
-	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
-		return false
-	}
-	// Do the full match.
-	return base60float.MatchString(s)
-}
-
-// From http://yaml.org/type/float.html, except the regular expression there
-// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
-var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
-
-func (e *encoder) stringv(tag string, in reflect.Value) {
-	var style yaml_scalar_style_t
-	s := in.String()
-	canUsePlain := true
-	switch {
-	case !utf8.ValidString(s):
-		if tag == yaml_BINARY_TAG {
-			failf("explicitly tagged !!binary data must be base64-encoded")
-		}
-		if tag != "" {
-			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
-		}
-		// It can't be encoded directly as YAML so use a binary tag
-		// and encode it as base64.
-		tag = yaml_BINARY_TAG
-		s = encodeBase64(s)
-	case tag == "":
-		// Check to see if it would resolve to a specific
-		// tag when encoded unquoted. If it doesn't,
-		// there's no need to quote it.
-		rtag, _ := resolve("", s)
-		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
-	}
-	// Note: it's possible for user code to emit invalid YAML
-	// if they explicitly specify a tag and a string containing
-	// text that's incompatible with that tag.
-	switch {
-	case strings.Contains(s, "\n"):
-		style = yaml_LITERAL_SCALAR_STYLE
-	case canUsePlain:
-		style = yaml_PLAIN_SCALAR_STYLE
-	default:
-		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
-	}
-	e.emitScalar(s, "", tag, style)
-}
-
-func (e *encoder) boolv(tag string, in reflect.Value) {
-	var s string
-	if in.Bool() {
-		s = "true"
-	} else {
-		s = "false"
-	}
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) intv(tag string, in reflect.Value) {
-	s := strconv.FormatInt(in.Int(), 10)
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) uintv(tag string, in reflect.Value) {
-	s := strconv.FormatUint(in.Uint(), 10)
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) timev(tag string, in reflect.Value) {
-	t := in.Interface().(time.Time)
-	s := t.Format(time.RFC3339Nano)
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) floatv(tag string, in reflect.Value) {
-	// Issue #352: When formatting, use the precision of the underlying value
-	precision := 64
-	if in.Kind() == reflect.Float32 {
-		precision = 32
-	}
-
-	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
-	switch s {
-	case "+Inf":
-		s = ".inf"
-	case "-Inf":
-		s = "-.inf"
-	case "NaN":
-		s = ".nan"
-	}
-	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) nilv() {
-	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
-	implicit := tag == ""
-	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
-	e.emit()
-}
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go
deleted file mode 100644
index a2dde60..0000000
--- a/vendor/gopkg.in/yaml.v2/writerc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package yaml
-
-// Set the writer error and return false.
-func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
-	emitter.error = yaml_WRITER_ERROR
-	emitter.problem = problem
-	return false
-}
-
-// Flush the output buffer.
-func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
-	if emitter.write_handler == nil {
-		panic("write handler not set")
-	}
-
-	// Check if the buffer is empty.
-	if emitter.buffer_pos == 0 {
-		return true
-	}
-
-	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
-		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
-	}
-	emitter.buffer_pos = 0
-	return true
-}
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
deleted file mode 100644
index de85aa4..0000000
--- a/vendor/gopkg.in/yaml.v2/yaml.go
+++ /dev/null
@@ -1,466 +0,0 @@
-// Package yaml implements YAML support for the Go language.
-//
-// Source code and other details for the project are available at GitHub:
-//
-//   https://github.com/go-yaml/yaml
-//
-package yaml
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"reflect"
-	"strings"
-	"sync"
-)
-
-// MapSlice encodes and decodes as a YAML map.
-// The order of keys is preserved when encoding and decoding.
-type MapSlice []MapItem
-
-// MapItem is an item in a MapSlice.
-type MapItem struct {
-	Key, Value interface{}
-}
-
-// The Unmarshaler interface may be implemented by types to customize their
-// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
-// method receives a function that may be called to unmarshal the original
-// YAML value into a field or variable. It is safe to call the unmarshal
-// function parameter more than once if necessary.
-type Unmarshaler interface {
-	UnmarshalYAML(unmarshal func(interface{}) error) error
-}
-
-// The Marshaler interface may be implemented by types to customize their
-// behavior when being marshaled into a YAML document. The returned value
-// is marshaled in place of the original value implementing Marshaler.
-//
-// If an error is returned by MarshalYAML, the marshaling procedure stops
-// and returns with the provided error.
-type Marshaler interface {
-	MarshalYAML() (interface{}, error)
-}
-
-// Unmarshal decodes the first document found within the in byte slice
-// and assigns decoded values into the out value.
-//
-// Maps and pointers (to a struct, string, int, etc) are accepted as out
-// values. If an internal pointer within a struct is not initialized,
-// the yaml package will initialize it if necessary for unmarshalling
-// the provided data. The out parameter must not be nil.
-//
-// The type of the decoded values should be compatible with the respective
-// values in out. If one or more values cannot be decoded due to a type
-// mismatches, decoding continues partially until the end of the YAML
-// content, and a *yaml.TypeError is returned with details for all
-// missed values.
-//
-// Struct fields are only unmarshalled if they are exported (have an
-// upper case first letter), and are unmarshalled using the field name
-// lowercased as the default key. Custom keys may be defined via the
-// "yaml" name in the field tag: the content preceding the first comma
-// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
-// Conflicting names result in a runtime error.
-//
-// For example:
-//
-//     type T struct {
-//         F int `yaml:"a,omitempty"`
-//         B int
-//     }
-//     var t T
-//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
-//
-// See the documentation of Marshal for the format of tags and a list of
-// supported tag options.
-//
-func Unmarshal(in []byte, out interface{}) (err error) {
-	return unmarshal(in, out, false)
-}
-
-// UnmarshalStrict is like Unmarshal except that any fields that are found
-// in the data that do not have corresponding struct members, or mapping
-// keys that are duplicates, will result in
-// an error.
-func UnmarshalStrict(in []byte, out interface{}) (err error) {
-	return unmarshal(in, out, true)
-}
-
-// A Decorder reads and decodes YAML values from an input stream.
-type Decoder struct {
-	strict bool
-	parser *parser
-}
-
-// NewDecoder returns a new decoder that reads from r.
-//
-// The decoder introduces its own buffering and may read
-// data from r beyond the YAML values requested.
-func NewDecoder(r io.Reader) *Decoder {
-	return &Decoder{
-		parser: newParserFromReader(r),
-	}
-}
-
-// SetStrict sets whether strict decoding behaviour is enabled when
-// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
-func (dec *Decoder) SetStrict(strict bool) {
-	dec.strict = strict
-}
-
-// Decode reads the next YAML-encoded value from its input
-// and stores it in the value pointed to by v.
-//
-// See the documentation for Unmarshal for details about the
-// conversion of YAML into a Go value.
-func (dec *Decoder) Decode(v interface{}) (err error) {
-	d := newDecoder(dec.strict)
-	defer handleErr(&err)
-	node := dec.parser.parse()
-	if node == nil {
-		return io.EOF
-	}
-	out := reflect.ValueOf(v)
-	if out.Kind() == reflect.Ptr && !out.IsNil() {
-		out = out.Elem()
-	}
-	d.unmarshal(node, out)
-	if len(d.terrors) > 0 {
-		return &TypeError{d.terrors}
-	}
-	return nil
-}
-
-func unmarshal(in []byte, out interface{}, strict bool) (err error) {
-	defer handleErr(&err)
-	d := newDecoder(strict)
-	p := newParser(in)
-	defer p.destroy()
-	node := p.parse()
-	if node != nil {
-		v := reflect.ValueOf(out)
-		if v.Kind() == reflect.Ptr && !v.IsNil() {
-			v = v.Elem()
-		}
-		d.unmarshal(node, v)
-	}
-	if len(d.terrors) > 0 {
-		return &TypeError{d.terrors}
-	}
-	return nil
-}
-
-// Marshal serializes the value provided into a YAML document. The structure
-// of the generated document will reflect the structure of the value itself.
-// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
-//
-// Struct fields are only marshalled if they are exported (have an upper case
-// first letter), and are marshalled using the field name lowercased as the
-// default key. Custom keys may be defined via the "yaml" name in the field
-// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
-// Conflicting names result in a runtime error.
-//
-// The field tag format accepted is:
-//
-//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
-//
-// The following flags are currently supported:
-//
-//     omitempty    Only include the field if it's not set to the zero
-//                  value for the type or to empty slices or maps.
-//                  Zero valued structs will be omitted if all their public
-//                  fields are zero, unless they implement an IsZero
-//                  method (see the IsZeroer interface type), in which
-//                  case the field will be included if that method returns true.
-//
-//     flow         Marshal using a flow style (useful for structs,
-//                  sequences and maps).
-//
-//     inline       Inline the field, which must be a struct or a map,
-//                  causing all of its fields or keys to be processed as if
-//                  they were part of the outer struct. For maps, keys must
-//                  not conflict with the yaml keys of other struct fields.
-//
-// In addition, if the key is "-", the field is ignored.
-//
-// For example:
-//
-//     type T struct {
-//         F int `yaml:"a,omitempty"`
-//         B int
-//     }
-//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
-//
-func Marshal(in interface{}) (out []byte, err error) {
-	defer handleErr(&err)
-	e := newEncoder()
-	defer e.destroy()
-	e.marshalDoc("", reflect.ValueOf(in))
-	e.finish()
-	out = e.out
-	return
-}
-
-// An Encoder writes YAML values to an output stream.
-type Encoder struct {
-	encoder *encoder
-}
-
-// NewEncoder returns a new encoder that writes to w.
-// The Encoder should be closed after use to flush all data
-// to w.
-func NewEncoder(w io.Writer) *Encoder {
-	return &Encoder{
-		encoder: newEncoderWithWriter(w),
-	}
-}
-
-// Encode writes the YAML encoding of v to the stream.
-// If multiple items are encoded to the stream, the
-// second and subsequent document will be preceded
-// with a "---" document separator, but the first will not.
-//
-// See the documentation for Marshal for details about the conversion of Go
-// values to YAML.
-func (e *Encoder) Encode(v interface{}) (err error) {
-	defer handleErr(&err)
-	e.encoder.marshalDoc("", reflect.ValueOf(v))
-	return nil
-}
-
-// Close closes the encoder by writing any remaining data.
-// It does not write a stream terminating string "...".
-func (e *Encoder) Close() (err error) {
-	defer handleErr(&err)
-	e.encoder.finish()
-	return nil
-}
-
-func handleErr(err *error) {
-	if v := recover(); v != nil {
-		if e, ok := v.(yamlError); ok {
-			*err = e.err
-		} else {
-			panic(v)
-		}
-	}
-}
-
-type yamlError struct {
-	err error
-}
-
-func fail(err error) {
-	panic(yamlError{err})
-}
-
-func failf(format string, args ...interface{}) {
-	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
-}
-
-// A TypeError is returned by Unmarshal when one or more fields in
-// the YAML document cannot be properly decoded into the requested
-// types. When this error is returned, the value is still
-// unmarshaled partially.
-type TypeError struct {
-	Errors []string
-}
-
-func (e *TypeError) Error() string {
-	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
-}
-
-// --------------------------------------------------------------------------
-// Maintain a mapping of keys to structure field indexes
-
-// The code in this section was copied from mgo/bson.
-
-// structInfo holds details for the serialization of fields of
-// a given struct.
-type structInfo struct {
-	FieldsMap  map[string]fieldInfo
-	FieldsList []fieldInfo
-
-	// InlineMap is the number of the field in the struct that
-	// contains an ,inline map, or -1 if there's none.
-	InlineMap int
-}
-
-type fieldInfo struct {
-	Key       string
-	Num       int
-	OmitEmpty bool
-	Flow      bool
-	// Id holds the unique field identifier, so we can cheaply
-	// check for field duplicates without maintaining an extra map.
-	Id int
-
-	// Inline holds the field index if the field is part of an inlined struct.
-	Inline []int
-}
-
-var structMap = make(map[reflect.Type]*structInfo)
-var fieldMapMutex sync.RWMutex
-
-func getStructInfo(st reflect.Type) (*structInfo, error) {
-	fieldMapMutex.RLock()
-	sinfo, found := structMap[st]
-	fieldMapMutex.RUnlock()
-	if found {
-		return sinfo, nil
-	}
-
-	n := st.NumField()
-	fieldsMap := make(map[string]fieldInfo)
-	fieldsList := make([]fieldInfo, 0, n)
-	inlineMap := -1
-	for i := 0; i != n; i++ {
-		field := st.Field(i)
-		if field.PkgPath != "" && !field.Anonymous {
-			continue // Private field
-		}
-
-		info := fieldInfo{Num: i}
-
-		tag := field.Tag.Get("yaml")
-		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
-			tag = string(field.Tag)
-		}
-		if tag == "-" {
-			continue
-		}
-
-		inline := false
-		fields := strings.Split(tag, ",")
-		if len(fields) > 1 {
-			for _, flag := range fields[1:] {
-				switch flag {
-				case "omitempty":
-					info.OmitEmpty = true
-				case "flow":
-					info.Flow = true
-				case "inline":
-					inline = true
-				default:
-					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
-				}
-			}
-			tag = fields[0]
-		}
-
-		if inline {
-			switch field.Type.Kind() {
-			case reflect.Map:
-				if inlineMap >= 0 {
-					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
-				}
-				if field.Type.Key() != reflect.TypeOf("") {
-					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
-				}
-				inlineMap = info.Num
-			case reflect.Struct:
-				sinfo, err := getStructInfo(field.Type)
-				if err != nil {
-					return nil, err
-				}
-				for _, finfo := range sinfo.FieldsList {
-					if _, found := fieldsMap[finfo.Key]; found {
-						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
-						return nil, errors.New(msg)
-					}
-					if finfo.Inline == nil {
-						finfo.Inline = []int{i, finfo.Num}
-					} else {
-						finfo.Inline = append([]int{i}, finfo.Inline...)
-					}
-					finfo.Id = len(fieldsList)
-					fieldsMap[finfo.Key] = finfo
-					fieldsList = append(fieldsList, finfo)
-				}
-			default:
-				//return nil, errors.New("Option ,inline needs a struct value or map field")
-				return nil, errors.New("Option ,inline needs a struct value field")
-			}
-			continue
-		}
-
-		if tag != "" {
-			info.Key = tag
-		} else {
-			info.Key = strings.ToLower(field.Name)
-		}
-
-		if _, found = fieldsMap[info.Key]; found {
-			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
-			return nil, errors.New(msg)
-		}
-
-		info.Id = len(fieldsList)
-		fieldsList = append(fieldsList, info)
-		fieldsMap[info.Key] = info
-	}
-
-	sinfo = &structInfo{
-		FieldsMap:  fieldsMap,
-		FieldsList: fieldsList,
-		InlineMap:  inlineMap,
-	}
-
-	fieldMapMutex.Lock()
-	structMap[st] = sinfo
-	fieldMapMutex.Unlock()
-	return sinfo, nil
-}
-
-// IsZeroer is used to check whether an object is zero to
-// determine whether it should be omitted when marshaling
-// with the omitempty flag. One notable implementation
-// is time.Time.
-type IsZeroer interface {
-	IsZero() bool
-}
-
-func isZero(v reflect.Value) bool {
-	kind := v.Kind()
-	if z, ok := v.Interface().(IsZeroer); ok {
-		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
-			return true
-		}
-		return z.IsZero()
-	}
-	switch kind {
-	case reflect.String:
-		return len(v.String()) == 0
-	case reflect.Interface, reflect.Ptr:
-		return v.IsNil()
-	case reflect.Slice:
-		return v.Len() == 0
-	case reflect.Map:
-		return v.Len() == 0
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Struct:
-		vt := v.Type()
-		for i := v.NumField() - 1; i >= 0; i-- {
-			if vt.Field(i).PkgPath != "" {
-				continue // Private field
-			}
-			if !isZero(v.Field(i)) {
-				return false
-			}
-		}
-		return true
-	}
-	return false
-}
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
new file mode 100644
index 0000000..2683e4b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+    apic.go emitterc.go parserc.go readerc.go scannerc.go
+    writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/NOTICE
rename to vendor/gopkg.in/yaml.v3/NOTICE
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v3/README.md
similarity index 66%
rename from vendor/gopkg.in/yaml.v2/README.md
rename to vendor/gopkg.in/yaml.v3/README.md
index b50c6e8..08eb1ba 100644
--- a/vendor/gopkg.in/yaml.v2/README.md
+++ b/vendor/gopkg.in/yaml.v3/README.md
@@ -12,7 +12,23 @@
 Compatibility
 -------------
 
-The yaml package supports most of YAML 1.1 and 1.2, including support for
+The yaml package supports most of YAML 1.2, but preserves some behavior
+from 1.1 for backwards compatibility.
+
+Specifically, as of v3 of the yaml package:
+
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
+   decoded into a typed bool value. Otherwise they behave as a string. Booleans
+   in YAML 1.2 are _true/false_ only.
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
+   as specified in YAML 1.2, because most parsers still use the old format.
+   Octals in the  _0o777_ format are supported though, so new files work.
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
+   actually never supported by this package as it's clearly a poor choice.
+
+and offers backwards
+compatibility with YAML 1.1 in some cases.
+1.2, including support for
 anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
 implemented, and base-60 floats from YAML 1.1 are purposefully not
 supported since they're a poor design and are gone in YAML 1.2.
@@ -20,29 +36,30 @@
 Installation and usage
 ----------------------
 
-The import path for the package is *gopkg.in/yaml.v2*.
+The import path for the package is *gopkg.in/yaml.v3*.
 
 To install it, run:
 
-    go get gopkg.in/yaml.v2
+    go get gopkg.in/yaml.v3
 
 API documentation
 -----------------
 
 If opened in a browser, the import path itself leads to the API documentation:
 
-  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+  - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
 
 API stability
 -------------
 
-The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
 
 
 License
 -------
 
-The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
 
 
 Example
@@ -55,7 +72,7 @@
         "fmt"
         "log"
 
-        "gopkg.in/yaml.v2"
+        "gopkg.in/yaml.v3"
 )
 
 var data = `
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
similarity index 93%
rename from vendor/gopkg.in/yaml.v2/apic.go
rename to vendor/gopkg.in/yaml.v3/apic.go
index 1f7e87e..ae7d049 100644
--- a/vendor/gopkg.in/yaml.v2/apic.go
+++ b/vendor/gopkg.in/yaml.v3/apic.go
@@ -1,3 +1,25 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
 package yaml
 
 import (
@@ -86,6 +108,7 @@
 		raw_buffer: make([]byte, 0, output_raw_buffer_size),
 		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
 		events:     make([]yaml_event_t, 0, initial_queue_size),
+		best_width: -1,
 	}
 }
 
@@ -138,7 +161,7 @@
 	emitter.canonical = canonical
 }
 
-//// Set the indentation increment.
+// Set the indentation increment.
 func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
 	if indent < 2 || indent > 9 {
 		indent = 2
@@ -288,29 +311,14 @@
 	}
 }
 
-///*
-// * Create ALIAS.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
-//{
-//    mark yaml_mark_t = { 0, 0, 0 }
-//    anchor_copy *yaml_char_t = NULL
-//
-//    assert(event) // Non-NULL event object is expected.
-//    assert(anchor) // Non-NULL anchor is expected.
-//
-//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
-//
-//    anchor_copy = yaml_strdup(anchor)
-//    if (!anchor_copy)
-//        return 0
-//
-//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
-//
-//    return 1
-//}
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+	*event = yaml_event_t{
+		typ:    yaml_ALIAS_EVENT,
+		anchor: anchor,
+	}
+	return true
+}
 
 // Create SCALAR.
 func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
new file mode 100644
index 0000000..df36e3a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -0,0 +1,950 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *Node
+	anchors  map[string]*Node
+	doneInit bool
+	textless bool
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	yaml_parser_set_input_reader(&p.parser, r)
+	return &p
+}
+
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.anchors = make(map[string]*Node)
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
+		}
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+	return p.event.typ
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
+	} else if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+	if anchor != nil {
+		n.Anchor = string(anchor)
+		p.anchors[n.Anchor] = n
+	}
+}
+
+func (p *parser) parse() *Node {
+	p.init()
+	switch p.peek() {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	case yaml_TAIL_COMMENT_EVENT:
+		panic("internal error: unexpected tail comment event (please report)")
+	default:
+		panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+	}
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+	var style Style
+	if tag != "" && tag != "!" {
+		tag = shortTag(tag)
+		style = TaggedStyle
+	} else if defaultTag != "" {
+		tag = defaultTag
+	} else if kind == ScalarNode {
+		tag, _ = resolve("", value)
+	}
+	n := &Node{
+		Kind:  kind,
+		Tag:   tag,
+		Value: value,
+		Style: style,
+	}
+	if !p.textless {
+		n.Line = p.event.start_mark.line + 1
+		n.Column = p.event.start_mark.column + 1
+		n.HeadComment = string(p.event.head_comment)
+		n.LineComment = string(p.event.line_comment)
+		n.FootComment = string(p.event.foot_comment)
+	}
+	return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+	child := p.parse()
+	parent.Content = append(parent.Content, child)
+	return child
+}
+
+func (p *parser) document() *Node {
+	n := p.node(DocumentNode, "", "", "")
+	p.doc = n
+	p.expect(yaml_DOCUMENT_START_EVENT)
+	p.parseChild(n)
+	if p.peek() == yaml_DOCUMENT_END_EVENT {
+		n.FootComment = string(p.event.foot_comment)
+	}
+	p.expect(yaml_DOCUMENT_END_EVENT)
+	return n
+}
+
+func (p *parser) alias() *Node {
+	n := p.node(AliasNode, "", "", string(p.event.anchor))
+	n.Alias = p.anchors[n.Value]
+	if n.Alias == nil {
+		failf("unknown anchor '%s' referenced", n.Value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
+	return n
+}
+
+func (p *parser) scalar() *Node {
+	var parsedStyle = p.event.scalar_style()
+	var nodeStyle Style
+	switch {
+	case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+		nodeStyle = DoubleQuotedStyle
+	case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+		nodeStyle = SingleQuotedStyle
+	case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+		nodeStyle = LiteralStyle
+	case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+		nodeStyle = FoldedStyle
+	}
+	var nodeValue = string(p.event.value)
+	var nodeTag = string(p.event.tag)
+	var defaultTag string
+	if nodeStyle == 0 {
+		if nodeValue == "<<" {
+			defaultTag = mergeTag
+		}
+	} else {
+		defaultTag = strTag
+	}
+	n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+	n.Style |= nodeStyle
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SCALAR_EVENT)
+	return n
+}
+
+func (p *parser) sequence() *Node {
+	n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+	if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+		n.Style |= FlowStyle
+	}
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
+		p.parseChild(n)
+	}
+	n.LineComment = string(p.event.line_comment)
+	n.FootComment = string(p.event.foot_comment)
+	p.expect(yaml_SEQUENCE_END_EVENT)
+	return n
+}
+
+func (p *parser) mapping() *Node {
+	n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+	block := true
+	if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+		block = false
+		n.Style |= FlowStyle
+	}
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
+		k := p.parseChild(n)
+		if block && k.FootComment != "" {
+			// Must be a foot comment for the prior value when being dedented.
+			if len(n.Content) > 2 {
+				n.Content[len(n.Content)-3].FootComment = k.FootComment
+				k.FootComment = ""
+			}
+		}
+		v := p.parseChild(n)
+		if k.FootComment == "" && v.FootComment != "" {
+			k.FootComment = v.FootComment
+			v.FootComment = ""
+		}
+		if p.peek() == yaml_TAIL_COMMENT_EVENT {
+			if k.FootComment == "" {
+				k.FootComment = string(p.event.foot_comment)
+			}
+			p.expect(yaml_TAIL_COMMENT_EVENT)
+		}
+	}
+	n.LineComment = string(p.event.line_comment)
+	n.FootComment = string(p.event.foot_comment)
+	if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+		n.Content[len(n.Content)-2].FootComment = n.FootComment
+		n.FootComment = ""
+	}
+	p.expect(yaml_MAPPING_END_EVENT)
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *Node
+	aliases map[*Node]bool
+	terrors []string
+
+	stringMapType  reflect.Type
+	generalMapType reflect.Type
+
+	knownFields bool
+	uniqueKeys  bool
+	decodeCount int
+	aliasCount  int
+	aliasDepth  int
+}
+
+var (
+	nodeType       = reflect.TypeOf(Node{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	stringMapType  = reflect.TypeOf(map[string]interface{}{})
+	generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = generalMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+	d := &decoder{
+		stringMapType:  stringMapType,
+		generalMapType: generalMapType,
+		uniqueKeys:     true,
+	}
+	d.aliases = make(map[*Node]bool)
+	return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+	if n.Tag != "" {
+		tag = n.Tag
+	}
+	value := n.Value
+	if tag != seqTag && tag != mapTag {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+	err := u.UnmarshalYAML(n)
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.ShortTag() == nullTag {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			outi := out.Addr().Interface()
+			if u, ok := outi.(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+			if u, ok := outi.(obsoleteUnmarshaler); ok {
+				good = d.callObsoleteUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+	if n.ShortTag() == nullTag {
+		return reflect.Value{}
+	}
+	for _, num := range index {
+		for {
+			if v.Kind() == reflect.Ptr {
+				if v.IsNil() {
+					v.Set(reflect.New(v.Type().Elem()))
+				}
+				v = v.Elem()
+				continue
+			}
+			break
+		}
+		v = v.Field(num)
+	}
+	return v
+}
+
+const (
+	// 400,000 decode operations is ~500kb of dense object declarations, or
+	// ~5kb of dense object declarations with 10000% alias expansion
+	alias_ratio_range_low = 400000
+
+	// 4,000,000 decode operations is ~5MB of dense object declarations, or
+	// ~4.5MB of dense object declarations with 10% alias expansion
+	alias_ratio_range_high = 4000000
+
+	// alias_ratio_range is the range over which we scale allowed alias ratios
+	alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+	switch {
+	case decodeCount <= alias_ratio_range_low:
+		// allow 99% to come from alias expansion for small-to-medium documents
+		return 0.99
+	case decodeCount >= alias_ratio_range_high:
+		// allow 10% to come from alias expansion for very large documents
+		return 0.10
+	default:
+		// scale smoothly from 99% down to 10% over the range.
+		// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+		// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+		return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+	}
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+	d.decodeCount++
+	if d.aliasDepth > 0 {
+		d.aliasCount++
+	}
+	if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+		failf("document contains excessive aliasing")
+	}
+	if out.Type() == nodeType {
+		out.Set(reflect.ValueOf(n).Elem())
+		return true
+	}
+	switch n.Kind {
+	case DocumentNode:
+		return d.document(n, out)
+	case AliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.Kind {
+	case ScalarNode:
+		good = d.scalar(n, out)
+	case MappingNode:
+		good = d.mapping(n, out)
+	case SequenceNode:
+		good = d.sequence(n, out)
+	case 0:
+		if n.IsZero() {
+			return d.null(out)
+		}
+		fallthrough
+	default:
+		failf("cannot decode node with unknown kind %d", n.Kind)
+	}
+	return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+	if len(n.Content) == 1 {
+		d.doc = n
+		d.unmarshal(n.Content[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
+		failf("anchor '%s' value contains itself", n.Value)
+	}
+	d.aliases[n] = true
+	d.aliasDepth++
+	good = d.unmarshal(n.Alias, out)
+	d.aliasDepth--
+	delete(d.aliases, n)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+	if out.CanAddr() {
+		switch out.Kind() {
+		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+			out.Set(reflect.Zero(out.Type()))
+			return true
+		}
+	}
+	return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+	var tag string
+	var resolved interface{}
+	if n.indicatedString() {
+		tag = strTag
+		resolved = n.Value
+	} else {
+		tag, resolved = resolve(n.Tag, n.Value)
+		if tag == binaryTag {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		return d.null(out)
+	}
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == binaryTag {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.Value)
+			}
+			err := u.UnmarshalText(text)
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == binaryTag {
+			out.SetString(resolved.(string))
+			return true
+		}
+		out.SetString(n.Value)
+		return true
+	case reflect.Interface:
+		out.Set(reflect.ValueOf(resolved))
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		// This used to work in v2, but it's very unfriendly.
+		isDuration := out.Type() == durationType
+
+		switch resolved := resolved.(type) {
+		case int:
+			if !isDuration && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case int64:
+			if !isDuration && !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				return true
+			}
+		case uint64:
+			if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case float64:
+			if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					return true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			return true
+		case string:
+			// This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+			// It only works if explicitly attempting to unmarshal into a typed bool value.
+			switch resolved {
+			case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+				out.SetBool(true)
+				return true
+			case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+				out.SetBool(false)
+				return true
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			return true
+		case int64:
+			out.SetFloat(float64(resolved))
+			return true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			return true
+		case float64:
+			out.SetFloat(resolved)
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
+		}
+	case reflect.Ptr:
+		panic("yaml internal error: please report the issue")
+	}
+	d.terror(n, tag, out)
+	return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+	l := len(n.Content)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, seqTag, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.Content[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+	l := len(n.Content)
+	if d.uniqueKeys {
+		nerrs := len(d.terrors)
+		for i := 0; i < l; i += 2 {
+			ni := n.Content[i]
+			for j := i + 2; j < l; j += 2 {
+				nj := n.Content[j]
+				if ni.Kind == nj.Kind && ni.Value == nj.Value {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+				}
+			}
+		}
+		if len(d.terrors) > nerrs {
+			return false
+		}
+	}
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		iface := out
+		if isStringMap(n) {
+			out = reflect.MakeMap(d.stringMapType)
+		} else {
+			out = reflect.MakeMap(d.generalMapType)
+		}
+		iface.Set(out)
+	default:
+		d.terror(n, mapTag, out)
+		return false
+	}
+
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	stringMapType := d.stringMapType
+	generalMapType := d.generalMapType
+	if outt.Elem() == ifaceType {
+		if outt.Key().Kind() == reflect.String {
+			d.stringMapType = outt
+		} else if outt.Key() == ifaceType {
+			d.generalMapType = outt
+		}
+	}
+
+	mapIsNew := false
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+		mapIsNew = true
+	}
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.Content[i]) {
+			d.merge(n.Content[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.Content[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
+				out.SetMapIndex(k, e)
+			}
+		}
+	}
+	d.stringMapType = stringMapType
+	d.generalMapType = generalMapType
+	return true
+}
+
+func isStringMap(n *Node) bool {
+	if n.Kind != MappingNode {
+		return false
+	}
+	l := len(n.Content)
+	for i := 0; i < l; i += 2 {
+		if n.Content[i].ShortTag() != strTag {
+			return false
+		}
+	}
+	return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	for _, index := range sinfo.InlineUnmarshalers {
+		field := d.fieldByIndex(n, out, index)
+		d.prepare(n, field)
+	}
+
+	var doneFields []bool
+	if d.uniqueKeys {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
+	name := settableValueOf("")
+	l := len(n.Content)
+	for i := 0; i < l; i += 2 {
+		ni := n.Content[i]
+		if isMerge(ni) {
+			d.merge(n.Content[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.uniqueKeys {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = d.fieldByIndex(n, out, info.Inline)
+			}
+			d.unmarshal(n.Content[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.Content[i+1], value)
+			inlineMap.SetMapIndex(name, value)
+		} else if d.knownFields {
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *Node, out reflect.Value) {
+	switch n.Kind {
+	case MappingNode:
+		d.unmarshal(n, out)
+	case AliasNode:
+		if n.Alias != nil && n.Alias.Kind != MappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case SequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.Content) - 1; i >= 0; i-- {
+			ni := n.Content[i]
+			if ni.Kind == AliasNode {
+				if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+					failWantMap()
+				}
+			} else if ni.Kind != MappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *Node) bool {
+	return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
similarity index 79%
rename from vendor/gopkg.in/yaml.v2/emitterc.go
rename to vendor/gopkg.in/yaml.v3/emitterc.go
index a1c2cc5..0f47c9c 100644
--- a/vendor/gopkg.in/yaml.v2/emitterc.go
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
 package yaml
 
 import (
@@ -43,8 +65,13 @@
 	default:
 		panic("unknown line break setting")
 	}
+	if emitter.column == 0 {
+		emitter.space_above = true
+	}
 	emitter.column = 0
 	emitter.line++
+	// [Go] Do this here and below and drop from everywhere else (see commented lines).
+	emitter.indention = true
 	return true
 }
 
@@ -97,8 +124,13 @@
 		if !write(emitter, s, i) {
 			return false
 		}
+		if emitter.column == 0 {
+			emitter.space_above = true
+		}
 		emitter.column = 0
 		emitter.line++
+		// [Go] Do this here and above and drop from everywhere else (see commented lines).
+		emitter.indention = true
 	}
 	return true
 }
@@ -203,7 +235,14 @@
 			emitter.indent = 0
 		}
 	} else if !indentless {
-		emitter.indent += emitter.best_indent
+		// [Go] This was changed so that indentations are more regular.
+		if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+			// The first indent inside a sequence will just skip the "- " indicator.
+			emitter.indent += 2
+		} else {
+			// Everything else aligns to the chosen indentation.
+			emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
+		}
 	}
 	return true
 }
@@ -228,16 +267,22 @@
 		return yaml_emitter_emit_document_end(emitter, event)
 
 	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
-		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+	case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
 
 	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
-		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
 
 	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
-		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+	case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
 
 	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
-		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
 
 	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
 		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
@@ -298,6 +343,8 @@
 	emitter.column = 0
 	emitter.whitespace = true
 	emitter.indention = true
+	emitter.space_above = true
+	emitter.foot_indent = -1
 
 	if emitter.encoding != yaml_UTF8_ENCODING {
 		if !yaml_emitter_write_bom(emitter) {
@@ -392,13 +439,22 @@
 			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
 				return false
 			}
-			if emitter.canonical {
+			if emitter.canonical || true {
 				if !yaml_emitter_write_indent(emitter) {
 					return false
 				}
 			}
 		}
 
+		if len(emitter.head_comment) > 0 {
+			if !yaml_emitter_process_head_comment(emitter) {
+				return false
+			}
+			if !put_break(emitter) {
+				return false
+			}
+		}
+
 		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
 		return true
 	}
@@ -425,7 +481,20 @@
 // Expect the root node.
 func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
 	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
-	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
 }
 
 // Expect DOCUMENT-END.
@@ -433,6 +502,12 @@
 	if event.typ != yaml_DOCUMENT_END_EVENT {
 		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
 	}
+	// [Go] Force document foot separation.
+	emitter.foot_indent = 0
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	emitter.foot_indent = -1
 	if !yaml_emitter_write_indent(emitter) {
 		return false
 	}
@@ -454,7 +529,7 @@
 }
 
 // Expect a flow item node.
-func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
 	if first {
 		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
 			return false
@@ -466,13 +541,15 @@
 	}
 
 	if event.typ == yaml_SEQUENCE_END_EVENT {
-		emitter.flow_level--
-		emitter.indent = emitter.indents[len(emitter.indents)-1]
-		emitter.indents = emitter.indents[:len(emitter.indents)-1]
-		if emitter.canonical && !first {
+		if emitter.canonical && !first && !trail {
 			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
 				return false
 			}
+		}
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.column == 0 || emitter.canonical && !first {
 			if !yaml_emitter_write_indent(emitter) {
 				return false
 			}
@@ -480,29 +557,62 @@
 		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
 			return false
 		}
+		if !yaml_emitter_process_line_comment(emitter) {
+			return false
+		}
+		if !yaml_emitter_process_foot_comment(emitter) {
+			return false
+		}
 		emitter.state = emitter.states[len(emitter.states)-1]
 		emitter.states = emitter.states[:len(emitter.states)-1]
 
 		return true
 	}
 
-	if !first {
+	if !first && !trail {
 		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
 			return false
 		}
 	}
 
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+	if emitter.column == 0 {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
 	if emitter.canonical || emitter.column > emitter.best_width {
 		if !yaml_emitter_write_indent(emitter) {
 			return false
 		}
 	}
-	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
-	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+	} else {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	}
+	if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+		return false
+	}
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
 }
 
 // Expect a flow key node.
-func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
 	if first {
 		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
 			return false
@@ -514,13 +624,18 @@
 	}
 
 	if event.typ == yaml_MAPPING_END_EVENT {
+		if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+		}
+		if !yaml_emitter_process_head_comment(emitter) {
+			return false
+		}
 		emitter.flow_level--
 		emitter.indent = emitter.indents[len(emitter.indents)-1]
 		emitter.indents = emitter.indents[:len(emitter.indents)-1]
 		if emitter.canonical && !first {
-			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
-				return false
-			}
 			if !yaml_emitter_write_indent(emitter) {
 				return false
 			}
@@ -528,16 +643,33 @@
 		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
 			return false
 		}
+		if !yaml_emitter_process_line_comment(emitter) {
+			return false
+		}
+		if !yaml_emitter_process_foot_comment(emitter) {
+			return false
+		}
 		emitter.state = emitter.states[len(emitter.states)-1]
 		emitter.states = emitter.states[:len(emitter.states)-1]
 		return true
 	}
 
-	if !first {
+	if !first && !trail {
 		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
 			return false
 		}
 	}
+
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+
+	if emitter.column == 0 {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
 	if emitter.canonical || emitter.column > emitter.best_width {
 		if !yaml_emitter_write_indent(emitter) {
 			return false
@@ -571,14 +703,32 @@
 			return false
 		}
 	}
-	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
-	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+	} else {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	}
+	if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+		return false
+	}
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
 }
 
 // Expect a block item node.
 func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
 	if first {
-		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
 			return false
 		}
 	}
@@ -589,6 +739,9 @@
 		emitter.states = emitter.states[:len(emitter.states)-1]
 		return true
 	}
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
 	if !yaml_emitter_write_indent(emitter) {
 		return false
 	}
@@ -596,7 +749,16 @@
 		return false
 	}
 	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
-	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+	if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
 }
 
 // Expect a block key node.
@@ -606,6 +768,9 @@
 			return false
 		}
 	}
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
 	if event.typ == yaml_MAPPING_END_EVENT {
 		emitter.indent = emitter.indents[len(emitter.indents)-1]
 		emitter.indents = emitter.indents[:len(emitter.indents)-1]
@@ -616,6 +781,13 @@
 	if !yaml_emitter_write_indent(emitter) {
 		return false
 	}
+	if len(emitter.line_comment) > 0 {
+		// [Go] A line comment was provided for the key. That's unusual as the
+		//      scanner associates line comments with the value. Either way,
+		//      save the line comment and render it appropriately later.
+		emitter.key_line_comment = emitter.line_comment
+		emitter.line_comment = nil
+	}
 	if yaml_emitter_check_simple_key(emitter) {
 		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
 		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
@@ -641,8 +813,42 @@
 			return false
 		}
 	}
+	if len(emitter.key_line_comment) > 0 {
+		// [Go] Line comments are generally associated with the value, but when there's
+		//      no value on the same line as a mapping key they end up attached to the
+		//      key itself.
+		if event.typ == yaml_SCALAR_EVENT {
+			if len(emitter.line_comment) == 0 {
+				// A scalar is coming and it has no line comments by itself yet,
+				// so just let it handle the line comment as usual. If it has a
+				// line comment, we can't have both so the one from the key is lost.
+				emitter.line_comment = emitter.key_line_comment
+				emitter.key_line_comment = nil
+			}
+		} else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
+			// An indented block follows, so write the comment right now.
+			emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+			if !yaml_emitter_process_line_comment(emitter) {
+				return false
+			}
+			emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+		}
+	}
 	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
-	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+	if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
 }
 
 // Expect a node.
@@ -908,6 +1114,71 @@
 	panic("unknown scalar style")
 }
 
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+	if len(emitter.tail_comment) > 0 {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+			return false
+		}
+		emitter.tail_comment = emitter.tail_comment[:0]
+		emitter.foot_indent = emitter.indent
+		if emitter.foot_indent < 0 {
+			emitter.foot_indent = 0
+		}
+	}
+
+	if len(emitter.head_comment) == 0 {
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+		return false
+	}
+	emitter.head_comment = emitter.head_comment[:0]
+	return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+	if len(emitter.line_comment) == 0 {
+		return true
+	}
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+		return false
+	}
+	emitter.line_comment = emitter.line_comment[:0]
+	return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+	if len(emitter.foot_comment) == 0 {
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+		return false
+	}
+	emitter.foot_comment = emitter.foot_comment[:0]
+	emitter.foot_indent = emitter.indent
+	if emitter.foot_indent < 0 {
+		emitter.foot_indent = 0
+	}
+	return true
+}
+
 // Check if a %YAML directive is valid.
 func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
 	if version_directive.major != 1 || version_directive.minor != 1 {
@@ -987,6 +1258,7 @@
 		flow_indicators    = false
 		line_breaks        = false
 		special_characters = false
+		tab_characters     = false
 
 		leading_space  = false
 		leading_break  = false
@@ -1055,7 +1327,9 @@
 			}
 		}
 
-		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+		if value[i] == '\t' {
+			tab_characters = true
+		} else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
 			special_characters = true
 		}
 		if is_space(value, i) {
@@ -1110,10 +1384,12 @@
 		emitter.scalar_data.block_plain_allowed = false
 		emitter.scalar_data.single_quoted_allowed = false
 	}
-	if space_break || special_characters {
+	if space_break || tab_characters || special_characters {
 		emitter.scalar_data.flow_plain_allowed = false
 		emitter.scalar_data.block_plain_allowed = false
 		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
 		emitter.scalar_data.block_allowed = false
 	}
 	if line_breaks {
@@ -1137,6 +1413,19 @@
 	emitter.tag_data.suffix = nil
 	emitter.scalar_data.value = nil
 
+	if len(event.head_comment) > 0 {
+		emitter.head_comment = event.head_comment
+	}
+	if len(event.line_comment) > 0 {
+		emitter.line_comment = event.line_comment
+	}
+	if len(event.foot_comment) > 0 {
+		emitter.foot_comment = event.foot_comment
+	}
+	if len(event.tail_comment) > 0 {
+		emitter.tail_comment = event.tail_comment
+	}
+
 	switch event.typ {
 	case yaml_ALIAS_EVENT:
 		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
@@ -1208,13 +1497,20 @@
 			return false
 		}
 	}
+	if emitter.foot_indent == indent {
+		if !put_break(emitter) {
+			return false
+		}
+	}
 	for emitter.column < indent {
 		if !put(emitter, ' ') {
 			return false
 		}
 	}
 	emitter.whitespace = true
-	emitter.indention = true
+	//emitter.indention = true
+	emitter.space_above = false
+	emitter.foot_indent = -1
 	return true
 }
 
@@ -1311,7 +1607,7 @@
 }
 
 func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-	if !emitter.whitespace {
+	if len(value) > 0 && !emitter.whitespace {
 		if !put(emitter, ' ') {
 			return false
 		}
@@ -1341,7 +1637,7 @@
 			if !write_break(emitter, value, &i) {
 				return false
 			}
-			emitter.indention = true
+			//emitter.indention = true
 			breaks = true
 		} else {
 			if breaks {
@@ -1358,7 +1654,9 @@
 		}
 	}
 
-	emitter.whitespace = false
+	if len(value) > 0 {
+		emitter.whitespace = false
+	}
 	emitter.indention = false
 	if emitter.root_context {
 		emitter.open_ended = true
@@ -1397,7 +1695,7 @@
 			if !write_break(emitter, value, &i) {
 				return false
 			}
-			emitter.indention = true
+			//emitter.indention = true
 			breaks = true
 		} else {
 			if breaks {
@@ -1596,10 +1894,10 @@
 	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
 		return false
 	}
-	if !put_break(emitter) {
+	if !yaml_emitter_process_line_comment(emitter) {
 		return false
 	}
-	emitter.indention = true
+	//emitter.indention = true
 	emitter.whitespace = true
 	breaks := true
 	for i := 0; i < len(value); {
@@ -1607,7 +1905,7 @@
 			if !write_break(emitter, value, &i) {
 				return false
 			}
-			emitter.indention = true
+			//emitter.indention = true
 			breaks = true
 		} else {
 			if breaks {
@@ -1633,11 +1931,11 @@
 	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
 		return false
 	}
-
-	if !put_break(emitter) {
+	if !yaml_emitter_process_line_comment(emitter) {
 		return false
 	}
-	emitter.indention = true
+
+	//emitter.indention = true
 	emitter.whitespace = true
 
 	breaks := true
@@ -1658,7 +1956,7 @@
 			if !write_break(emitter, value, &i) {
 				return false
 			}
-			emitter.indention = true
+			//emitter.indention = true
 			breaks = true
 		} else {
 			if breaks {
@@ -1683,3 +1981,40 @@
 	}
 	return true
 }
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+	breaks := false
+	pound := false
+	for i := 0; i < len(comment); {
+		if is_break(comment, i) {
+			if !write_break(emitter, comment, &i) {
+				return false
+			}
+			//emitter.indention = true
+			breaks = true
+			pound = false
+		} else {
+			if breaks && !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !pound {
+				if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+					return false
+				}
+				pound = true
+			}
+			if !write(emitter, comment, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	if !breaks && !put_break(emitter) {
+		return false
+	}
+
+	emitter.whitespace = true
+	//emitter.indention = true
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
new file mode 100644
index 0000000..de9e72a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/encode.go
@@ -0,0 +1,577 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+type encoder struct {
+	emitter  yaml_emitter_t
+	event    yaml_event_t
+	out      []byte
+	flow     bool
+	indent   int
+	doneInit bool
+}
+
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	if e.indent == 0 {
+		e.indent = 4
+	}
+	e.emitter.best_indent = e.indent
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
+	e.emitter.open_ended = false
+	yaml_stream_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	var node *Node
+	if in.IsValid() {
+		node, _ = in.Interface().(*Node)
+	}
+	if node != nil && node.Kind == DocumentNode {
+		e.nodev(in)
+	} else {
+		yaml_document_start_event_initialize(&e.event, nil, nil, true)
+		e.emit()
+		e.marshal(tag, in)
+		yaml_document_end_event_initialize(&e.event, true)
+		e.emit()
+	}
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	tag = shortTag(tag)
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	switch value := iface.(type) {
+	case *Node:
+		e.nodev(in)
+		return
+	case Node:
+		if !in.CanAddr() {
+			var n = reflect.New(in.Type()).Elem()
+			n.Set(in)
+			in = n
+		}
+		e.nodev(in.Addr())
+		return
+	case time.Time:
+		e.timev(tag, in)
+		return
+	case *time.Time:
+		e.timev(tag, in.Elem())
+		return
+	case time.Duration:
+		e.stringv(tag, reflect.ValueOf(value.String()))
+		return
+	case Marshaler:
+		v, err := value.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		e.marshal(tag, reflect.ValueOf(v))
+		return
+	case encoding.TextMarshaler:
+		text, err := value.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		e.marshal(tag, in.Elem())
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		e.marshal(tag, in.Elem())
+	case reflect.Struct:
+		e.structv(tag, in)
+	case reflect.Slice, reflect.Array:
+		e.slicev(tag, in)
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		e.intv(tag, in)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+	for _, num := range index {
+		for {
+			if v.Kind() == reflect.Ptr {
+				if v.IsNil() {
+					return reflect.Value{}
+				}
+				v = v.Elem()
+				continue
+			}
+			break
+		}
+		v = v.Field(num)
+	}
+	return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = e.fieldByIndex(in, info.Inline)
+				if !value.IsValid() {
+					continue
+				}
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+	e.emit()
+	f()
+	yaml_mapping_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+	switch s {
+	case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+		"n", "N", "no", "No", "NO", "off", "Off", "OFF":
+		return true
+	default:
+		return false
+	}
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == binaryTag {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		}
+		if tag != "" {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = binaryTag
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+	}
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
+		if e.flow {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		} else {
+			style = yaml_LITERAL_SCALAR_STYLE
+		}
+	case canUsePlain:
+		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+	// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+	implicit := tag == ""
+	if !implicit {
+		tag = longTag(tag)
+	}
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.event.head_comment = head
+	e.event.line_comment = line
+	e.event.foot_comment = foot
+	e.event.tail_comment = tail
+	e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+	e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+	// Zero nodes behave as nil.
+	if node.Kind == 0 && node.IsZero() {
+		e.nilv()
+		return
+	}
+
+	// If the tag was not explicitly requested, and dropping it won't change the
+	// implicit tag of the value, don't include it in the presentation.
+	var tag = node.Tag
+	var stag = shortTag(tag)
+	var forceQuoting bool
+	if tag != "" && node.Style&TaggedStyle == 0 {
+		if node.Kind == ScalarNode {
+			if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+				tag = ""
+			} else {
+				rtag, _ := resolve("", node.Value)
+				if rtag == stag {
+					tag = ""
+				} else if stag == strTag {
+					tag = ""
+					forceQuoting = true
+				}
+			}
+		} else {
+			var rtag string
+			switch node.Kind {
+			case MappingNode:
+				rtag = mapTag
+			case SequenceNode:
+				rtag = seqTag
+			}
+			if rtag == stag {
+				tag = ""
+			}
+		}
+	}
+
+	switch node.Kind {
+	case DocumentNode:
+		yaml_document_start_event_initialize(&e.event, nil, nil, true)
+		e.event.head_comment = []byte(node.HeadComment)
+		e.emit()
+		for _, node := range node.Content {
+			e.node(node, "")
+		}
+		yaml_document_end_event_initialize(&e.event, true)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case SequenceNode:
+		style := yaml_BLOCK_SEQUENCE_STYLE
+		if node.Style&FlowStyle != 0 {
+			style = yaml_FLOW_SEQUENCE_STYLE
+		}
+		e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+		e.event.head_comment = []byte(node.HeadComment)
+		e.emit()
+		for _, node := range node.Content {
+			e.node(node, "")
+		}
+		e.must(yaml_sequence_end_event_initialize(&e.event))
+		e.event.line_comment = []byte(node.LineComment)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case MappingNode:
+		style := yaml_BLOCK_MAPPING_STYLE
+		if node.Style&FlowStyle != 0 {
+			style = yaml_FLOW_MAPPING_STYLE
+		}
+		yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+		e.event.tail_comment = []byte(tail)
+		e.event.head_comment = []byte(node.HeadComment)
+		e.emit()
+
+		// The tail logic below moves the foot comment of prior keys to the following key,
+		// since the value for each key may be a nested structure and the foot needs to be
+		// processed only the entirety of the value is streamed. The last tail is processed
+		// with the mapping end event.
+		var tail string
+		for i := 0; i+1 < len(node.Content); i += 2 {
+			k := node.Content[i]
+			foot := k.FootComment
+			if foot != "" {
+				kopy := *k
+				kopy.FootComment = ""
+				k = &kopy
+			}
+			e.node(k, tail)
+			tail = foot
+
+			v := node.Content[i+1]
+			e.node(v, "")
+		}
+
+		yaml_mapping_end_event_initialize(&e.event)
+		e.event.tail_comment = []byte(tail)
+		e.event.line_comment = []byte(node.LineComment)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case AliasNode:
+		yaml_alias_event_initialize(&e.event, []byte(node.Value))
+		e.event.head_comment = []byte(node.HeadComment)
+		e.event.line_comment = []byte(node.LineComment)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case ScalarNode:
+		value := node.Value
+		if !utf8.ValidString(value) {
+			if stag == binaryTag {
+				failf("explicitly tagged !!binary data must be base64-encoded")
+			}
+			if stag != "" {
+				failf("cannot marshal invalid UTF-8 data as %s", stag)
+			}
+			// It can't be encoded directly as YAML so use a binary tag
+			// and encode it as base64.
+			tag = binaryTag
+			value = encodeBase64(value)
+		}
+
+		style := yaml_PLAIN_SCALAR_STYLE
+		switch {
+		case node.Style&DoubleQuotedStyle != 0:
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		case node.Style&SingleQuotedStyle != 0:
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		case node.Style&LiteralStyle != 0:
+			style = yaml_LITERAL_SCALAR_STYLE
+		case node.Style&FoldedStyle != 0:
+			style = yaml_FOLDED_SCALAR_STYLE
+		case strings.Contains(value, "\n"):
+			style = yaml_LITERAL_SCALAR_STYLE
+		case forceQuoting:
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+
+		e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+	default:
+		failf("cannot encode node with unknown kind %d", node.Kind)
+	}
+}
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v3/go.mod
similarity index 72%
rename from vendor/gopkg.in/yaml.v2/go.mod
rename to vendor/gopkg.in/yaml.v3/go.mod
index 1934e87..f407ea3 100644
--- a/vendor/gopkg.in/yaml.v2/go.mod
+++ b/vendor/gopkg.in/yaml.v3/go.mod
@@ -1,4 +1,4 @@
-module "gopkg.in/yaml.v2"
+module "gopkg.in/yaml.v3"
 
 require (
 	"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
similarity index 85%
rename from vendor/gopkg.in/yaml.v2/parserc.go
rename to vendor/gopkg.in/yaml.v3/parserc.go
index 81d05df..ac66fcc 100644
--- a/vendor/gopkg.in/yaml.v2/parserc.go
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
 package yaml
 
 import (
@@ -45,11 +67,46 @@
 // Peek the next token in the token queue.
 func peek_token(parser *yaml_parser_t) *yaml_token_t {
 	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
-		return &parser.tokens[parser.tokens_head]
+		token := &parser.tokens[parser.tokens_head]
+		yaml_parser_unfold_comments(parser, token)
+		return token
 	}
 	return nil
 }
 
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+	for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+		comment := &parser.comments[parser.comments_head]
+		if len(comment.head) > 0 {
+			if token.typ == yaml_BLOCK_END_TOKEN {
+				// No heads on ends, so keep comment.head for a follow up token.
+				break
+			}
+			if len(parser.head_comment) > 0 {
+				parser.head_comment = append(parser.head_comment, '\n')
+			}
+			parser.head_comment = append(parser.head_comment, comment.head...)
+		}
+		if len(comment.foot) > 0 {
+			if len(parser.foot_comment) > 0 {
+				parser.foot_comment = append(parser.foot_comment, '\n')
+			}
+			parser.foot_comment = append(parser.foot_comment, comment.foot...)
+		}
+		if len(comment.line) > 0 {
+			if len(parser.line_comment) > 0 {
+				parser.line_comment = append(parser.line_comment, '\n')
+			}
+			parser.line_comment = append(parser.line_comment, comment.line...)
+		}
+		*comment = yaml_comment_t{}
+		parser.comments_head++
+	}
+}
+
 // Remove the next token from the queue (must be called after peek_token).
 func skip_token(parser *yaml_parser_t) {
 	parser.token_available = false
@@ -224,10 +281,32 @@
 		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
 		parser.state = yaml_PARSE_BLOCK_NODE_STATE
 
+		var head_comment []byte
+		if len(parser.head_comment) > 0 {
+			// [Go] Scan the header comment backwards, and if an empty line is found, break
+			//      the header so the part before the last empty line goes into the
+			//      document header, while the bottom of it goes into a follow up event.
+			for i := len(parser.head_comment) - 1; i > 0; i-- {
+				if parser.head_comment[i] == '\n' {
+					if i == len(parser.head_comment)-1 {
+						head_comment = parser.head_comment[:i]
+						parser.head_comment = parser.head_comment[i+1:]
+						break
+					} else if parser.head_comment[i-1] == '\n' {
+						head_comment = parser.head_comment[:i-1]
+						parser.head_comment = parser.head_comment[i+1:]
+						break
+					}
+				}
+			}
+		}
+
 		*event = yaml_event_t{
 			typ:        yaml_DOCUMENT_START_EVENT,
 			start_mark: token.start_mark,
 			end_mark:   token.end_mark,
+
+			head_comment: head_comment,
 		}
 
 	} else if token.typ != yaml_STREAM_END_TOKEN {
@@ -284,6 +363,7 @@
 	if token == nil {
 		return false
 	}
+
 	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
 		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
 		token.typ == yaml_DOCUMENT_START_TOKEN ||
@@ -327,9 +407,25 @@
 		end_mark:   end_mark,
 		implicit:   implicit,
 	}
+	yaml_parser_set_event_comments(parser, event)
+	if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+		event.foot_comment = event.head_comment
+		event.head_comment = nil
+	}
 	return true
 }
 
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+	event.head_comment = parser.head_comment
+	event.line_comment = parser.line_comment
+	event.foot_comment = parser.foot_comment
+	parser.head_comment = nil
+	parser.line_comment = nil
+	parser.foot_comment = nil
+	parser.tail_comment = nil
+	parser.stem_comment = nil
+}
+
 // Parse the productions:
 // block_node_or_indentless_sequence    ::=
 //                          ALIAS
@@ -373,6 +469,7 @@
 			end_mark:   token.end_mark,
 			anchor:     token.value,
 		}
+		yaml_parser_set_event_comments(parser, event)
 		skip_token(parser)
 		return true
 	}
@@ -486,6 +583,7 @@
 			quoted_implicit: quoted_implicit,
 			style:           yaml_style_t(token.style),
 		}
+		yaml_parser_set_event_comments(parser, event)
 		skip_token(parser)
 		return true
 	}
@@ -502,6 +600,7 @@
 			implicit:   implicit,
 			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
 		}
+		yaml_parser_set_event_comments(parser, event)
 		return true
 	}
 	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
@@ -516,6 +615,7 @@
 			implicit:   implicit,
 			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
 		}
+		yaml_parser_set_event_comments(parser, event)
 		return true
 	}
 	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
@@ -530,6 +630,10 @@
 			implicit:   implicit,
 			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
 		}
+		if parser.stem_comment != nil {
+			event.head_comment = parser.stem_comment
+			parser.stem_comment = nil
+		}
 		return true
 	}
 	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
@@ -544,6 +648,10 @@
 			implicit:   implicit,
 			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
 		}
+		if parser.stem_comment != nil {
+			event.head_comment = parser.stem_comment
+			parser.stem_comment = nil
+		}
 		return true
 	}
 	if len(anchor) > 0 || len(tag) > 0 {
@@ -590,7 +698,9 @@
 
 	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
 		mark := token.end_mark
+		prior_head_len := len(parser.head_comment)
 		skip_token(parser)
+		yaml_parser_split_stem_comment(parser, prior_head_len)
 		token = peek_token(parser)
 		if token == nil {
 			return false
@@ -636,7 +746,9 @@
 
 	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
 		mark := token.end_mark
+		prior_head_len := len(parser.head_comment)
 		skip_token(parser)
+		yaml_parser_split_stem_comment(parser, prior_head_len)
 		token = peek_token(parser)
 		if token == nil {
 			return false
@@ -662,6 +774,32 @@
 	return true
 }
 
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+	if stem_len == 0 {
+		return
+	}
+
+	token := peek_token(parser)
+	if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+		return
+	}
+
+	parser.stem_comment = parser.head_comment[:stem_len]
+	if len(parser.head_comment) == stem_len {
+		parser.head_comment = nil
+	} else {
+		// Copy suffix to prevent very strange bugs if someone ever appends
+		// further bytes to the prefix in the stem_comment slice above.
+		parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+	}
+}
+
 // Parse the productions:
 // block_mapping        ::= BLOCK-MAPPING_START
 //                          *******************
@@ -684,6 +822,19 @@
 		return false
 	}
 
+	// [Go] A tail comment was left from the prior mapping value processed. Emit an event
+	//      as it needs to be processed with that value and not the following key.
+	if len(parser.tail_comment) > 0 {
+		*event = yaml_event_t{
+			typ:          yaml_TAIL_COMMENT_EVENT,
+			start_mark:   token.start_mark,
+			end_mark:     token.end_mark,
+			foot_comment: parser.tail_comment,
+		}
+		parser.tail_comment = nil
+		return true
+	}
+
 	if token.typ == yaml_KEY_TOKEN {
 		mark := token.end_mark
 		skip_token(parser)
@@ -709,6 +860,7 @@
 			start_mark: token.start_mark,
 			end_mark:   token.end_mark,
 		}
+		yaml_parser_set_event_comments(parser, event)
 		skip_token(parser)
 		return true
 	}
@@ -820,6 +972,7 @@
 		start_mark: token.start_mark,
 		end_mark:   token.end_mark,
 	}
+	yaml_parser_set_event_comments(parser, event)
 
 	skip_token(parser)
 	return true
@@ -959,6 +1112,7 @@
 		start_mark: token.start_mark,
 		end_mark:   token.end_mark,
 	}
+	yaml_parser_set_event_comments(parser, event)
 	skip_token(parser)
 	return true
 }
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
similarity index 91%
rename from vendor/gopkg.in/yaml.v2/readerc.go
rename to vendor/gopkg.in/yaml.v3/readerc.go
index 7c1f5fa..b7de0a8 100644
--- a/vendor/gopkg.in/yaml.v2/readerc.go
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
@@ -1,3 +1,25 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
 package yaml
 
 import (
@@ -95,7 +117,7 @@
 
 	// [Go] This function was changed to guarantee the requested length size at EOF.
 	// The fact we need to do this is pretty awful, but the description above implies
-	// for that to be the case, and there are tests 
+	// for that to be the case, and there are tests
 
 	// If the EOF flag is set and the raw buffer is empty, do nothing.
 	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
similarity index 60%
rename from vendor/gopkg.in/yaml.v2/resolve.go
rename to vendor/gopkg.in/yaml.v3/resolve.go
index 4120e0c..64ae888 100644
--- a/vendor/gopkg.in/yaml.v2/resolve.go
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
@@ -1,3 +1,18 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package yaml
 
 import (
@@ -34,18 +49,14 @@
 		tag string
 		l   []string
 	}{
-		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
-		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
-		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
-		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
-		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
-		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
-		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
-		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
-		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
-		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
-		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
-		{"<<", yaml_MERGE_TAG, []string{"<<"}},
+		{true, boolTag, []string{"true", "True", "TRUE"}},
+		{false, boolTag, []string{"false", "False", "FALSE"}},
+		{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", mergeTag, []string{"<<"}},
 	}
 
 	m := resolveMap
@@ -56,11 +67,37 @@
 	}
 }
 
+const (
+	nullTag      = "!!null"
+	boolTag      = "!!bool"
+	strTag       = "!!str"
+	intTag       = "!!int"
+	floatTag     = "!!float"
+	timestampTag = "!!timestamp"
+	seqTag       = "!!seq"
+	mapTag       = "!!map"
+	binaryTag    = "!!binary"
+	mergeTag     = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+	for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+		ltag := longTag(stag)
+		longTags[stag] = ltag
+		shortTags[ltag] = stag
+	}
+}
+
 const longTagPrefix = "tag:yaml.org,2002:"
 
 func shortTag(tag string) string {
-	// TODO This can easily be made faster and produce less garbage.
 	if strings.HasPrefix(tag, longTagPrefix) {
+		if stag, ok := shortTags[tag]; ok {
+			return stag
+		}
 		return "!!" + tag[len(longTagPrefix):]
 	}
 	return tag
@@ -68,6 +105,9 @@
 
 func longTag(tag string) string {
 	if strings.HasPrefix(tag, "!!") {
+		if ltag, ok := longTags[tag]; ok {
+			return ltag
+		}
 		return longTagPrefix + tag[2:]
 	}
 	return tag
@@ -75,7 +115,7 @@
 
 func resolvableTag(tag string) bool {
 	switch tag {
-	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+	case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
 		return true
 	}
 	return false
@@ -84,23 +124,24 @@
 var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
 
 func resolve(tag string, in string) (rtag string, out interface{}) {
+	tag = shortTag(tag)
 	if !resolvableTag(tag) {
 		return tag, in
 	}
 
 	defer func() {
 		switch tag {
-		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+		case "", rtag, strTag, binaryTag:
 			return
-		case yaml_FLOAT_TAG:
-			if rtag == yaml_INT_TAG {
+		case floatTag:
+			if rtag == intTag {
 				switch v := out.(type) {
 				case int64:
-					rtag = yaml_FLOAT_TAG
+					rtag = floatTag
 					out = float64(v)
 					return
 				case int:
-					rtag = yaml_FLOAT_TAG
+					rtag = floatTag
 					out = float64(v)
 					return
 				}
@@ -115,7 +156,7 @@
 	if in != "" {
 		hint = resolveTable[in[0]]
 	}
-	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+	if hint != 0 && tag != strTag && tag != binaryTag {
 		// Handle things we can lookup in a map.
 		if item, ok := resolveMap[in]; ok {
 			return item.tag, item.value
@@ -133,17 +174,17 @@
 			// Not in the map, so maybe a normal float.
 			floatv, err := strconv.ParseFloat(in, 64)
 			if err == nil {
-				return yaml_FLOAT_TAG, floatv
+				return floatTag, floatv
 			}
 
 		case 'D', 'S':
 			// Int, float, or timestamp.
 			// Only try values as a timestamp if the value is unquoted or there's an explicit
 			// !!timestamp tag.
-			if tag == "" || tag == yaml_TIMESTAMP_TAG {
+			if tag == "" || tag == timestampTag {
 				t, ok := parseTimestamp(in)
 				if ok {
-					return yaml_TIMESTAMP_TAG, t
+					return timestampTag, t
 				}
 			}
 
@@ -151,49 +192,76 @@
 			intv, err := strconv.ParseInt(plain, 0, 64)
 			if err == nil {
 				if intv == int64(int(intv)) {
-					return yaml_INT_TAG, int(intv)
+					return intTag, int(intv)
 				} else {
-					return yaml_INT_TAG, intv
+					return intTag, intv
 				}
 			}
 			uintv, err := strconv.ParseUint(plain, 0, 64)
 			if err == nil {
-				return yaml_INT_TAG, uintv
+				return intTag, uintv
 			}
 			if yamlStyleFloat.MatchString(plain) {
 				floatv, err := strconv.ParseFloat(plain, 64)
 				if err == nil {
-					return yaml_FLOAT_TAG, floatv
+					return floatTag, floatv
 				}
 			}
 			if strings.HasPrefix(plain, "0b") {
 				intv, err := strconv.ParseInt(plain[2:], 2, 64)
 				if err == nil {
 					if intv == int64(int(intv)) {
-						return yaml_INT_TAG, int(intv)
+						return intTag, int(intv)
 					} else {
-						return yaml_INT_TAG, intv
+						return intTag, intv
 					}
 				}
 				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
 				if err == nil {
-					return yaml_INT_TAG, uintv
+					return intTag, uintv
 				}
 			} else if strings.HasPrefix(plain, "-0b") {
-				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+				intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
 				if err == nil {
 					if true || intv == int64(int(intv)) {
-						return yaml_INT_TAG, int(intv)
+						return intTag, int(intv)
 					} else {
-						return yaml_INT_TAG, intv
+						return intTag, intv
+					}
+				}
+			}
+			// Octals as introduced in version 1.2 of the spec.
+			// Octals from the 1.1 spec, spelled as 0777, are still
+			// decoded by default in v3 as well for compatibility.
+			// May be dropped in v4 depending on how usage evolves.
+			if strings.HasPrefix(plain, "0o") {
+				intv, err := strconv.ParseInt(plain[2:], 8, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return intTag, int(intv)
+					} else {
+						return intTag, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+				if err == nil {
+					return intTag, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0o") {
+				intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+				if err == nil {
+					if true || intv == int64(int(intv)) {
+						return intTag, int(intv)
+					} else {
+						return intTag, intv
 					}
 				}
 			}
 		default:
-			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+			panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
 		}
 	}
-	return yaml_STR_TAG, in
+	return strTag, in
 }
 
 // encodeBase64 encodes s as base64 that is broken up into multiple lines
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
similarity index 84%
rename from vendor/gopkg.in/yaml.v2/scannerc.go
rename to vendor/gopkg.in/yaml.v3/scannerc.go
index 077fd1d..ca00701 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
 package yaml
 
 import (
@@ -489,6 +511,9 @@
 
 // Advance the buffer pointer.
 func skip(parser *yaml_parser_t) {
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		parser.newlines = 0
+	}
 	parser.mark.index++
 	parser.mark.column++
 	parser.unread--
@@ -502,17 +527,22 @@
 		parser.mark.line++
 		parser.unread -= 2
 		parser.buffer_pos += 2
+		parser.newlines++
 	} else if is_break(parser.buffer, parser.buffer_pos) {
 		parser.mark.index++
 		parser.mark.column = 0
 		parser.mark.line++
 		parser.unread--
 		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+		parser.newlines++
 	}
 }
 
 // Copy a character to a string buffer and advance pointers.
 func read(parser *yaml_parser_t, s []byte) []byte {
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		parser.newlines = 0
+	}
 	w := width(parser.buffer[parser.buffer_pos])
 	if w == 0 {
 		panic("invalid character sequence")
@@ -564,6 +594,7 @@
 	parser.mark.column = 0
 	parser.mark.line++
 	parser.unread--
+	parser.newlines++
 	return s
 }
 
@@ -626,30 +657,21 @@
 func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
 	// While we need more tokens to fetch, do it.
 	for {
-		// Check if we really need to fetch more tokens.
-		need_more_tokens := false
-
-		if parser.tokens_head == len(parser.tokens) {
-			// Queue is empty.
-			need_more_tokens = true
-		} else {
-			// Check if any potential simple key may occupy the head position.
-			if !yaml_parser_stale_simple_keys(parser) {
+		// [Go] The comment parsing logic requires a lookahead of two tokens
+		// so that foot comments may be parsed in time of associating them
+		// with the tokens that are parsed before them, and also for line
+		// comments to be transformed into head comments in some edge cases.
+		if parser.tokens_head < len(parser.tokens)-2 {
+			// If a potential simple key is at the head position, we need to fetch
+			// the next token to disambiguate it.
+			head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+			if !ok {
+				break
+			} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
 				return false
+			} else if !valid {
+				break
 			}
-
-			for i := range parser.simple_keys {
-				simple_key := &parser.simple_keys[i]
-				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
-					need_more_tokens = true
-					break
-				}
-			}
-		}
-
-		// We are finished.
-		if !need_more_tokens {
-			break
 		}
 		// Fetch the next token.
 		if !yaml_parser_fetch_next_token(parser) {
@@ -662,7 +684,7 @@
 }
 
 // The dispatcher for token fetchers.
-func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
 	// Ensure that the buffer is initialized.
 	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
 		return false
@@ -673,18 +695,19 @@
 		return yaml_parser_fetch_stream_start(parser)
 	}
 
+	scan_mark := parser.mark
+
 	// Eat whitespaces and comments until we reach the next token.
 	if !yaml_parser_scan_to_next_token(parser) {
 		return false
 	}
 
-	// Remove obsolete potential simple keys.
-	if !yaml_parser_stale_simple_keys(parser) {
-		return false
-	}
+	// [Go] While unrolling indents, transform the head comments of prior
+	// indentation levels observed after scan_start into foot comments at
+	// the respective indexes.
 
 	// Check the indentation level against the current column.
-	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+	if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
 		return false
 	}
 
@@ -717,6 +740,26 @@
 		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
 	}
 
+	comment_mark := parser.mark
+	if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+		// Associate any following comments with the prior token.
+		comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+	}
+	defer func() {
+		if !ok {
+			return
+		}
+		if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+			// Sequence indicators alone have no line comments. It becomes
+			// a head comment for whatever follows.
+			return
+		}
+		if !yaml_parser_scan_line_comment(parser, comment_mark) {
+			ok = false
+			return
+		}
+	}()
+
 	// Is it the flow sequence start indicator?
 	if buf[pos] == '[' {
 		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
@@ -810,7 +853,7 @@
 	// if it is followed by a non-space character.
 	//
 	// The last rule is more restrictive than the specification requires.
-	// [Go] Make this logic more reasonable.
+	// [Go] TODO Make this logic more reasonable.
 	//switch parser.buffer[parser.buffer_pos] {
 	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
 	//}
@@ -837,29 +880,30 @@
 		"found character that cannot start any token")
 }
 
-// Check the list of potential simple keys and remove the positions that
-// cannot contain simple keys anymore.
-func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
-	// Check for a potential simple key for each flow level.
-	for i := range parser.simple_keys {
-		simple_key := &parser.simple_keys[i]
-
-		// The specification requires that a simple key
-		//
-		//  - is limited to a single line,
-		//  - is shorter than 1024 characters.
-		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
-
-			// Check if the potential simple key to be removed is required.
-			if simple_key.required {
-				return yaml_parser_set_scanner_error(parser,
-					"while scanning a simple key", simple_key.mark,
-					"could not find expected ':'")
-			}
-			simple_key.possible = false
-		}
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+	if !simple_key.possible {
+		return false, true
 	}
-	return true
+
+	// The 1.2 specification says:
+	//
+	//     "If the ? indicator is omitted, parsing needs to see past the
+	//     implicit key to recognize it as such. To limit the amount of
+	//     lookahead required, the “:” indicator must appear at most 1024
+	//     Unicode characters beyond the start of the key. In addition, the key
+	//     is restricted to a single line."
+	//
+	if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+		// Check if the potential simple key to be removed is required.
+		if simple_key.required {
+			return false, yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", simple_key.mark,
+				"could not find expected ':'")
+		}
+		simple_key.possible = false
+		return false, true
+	}
+	return true, true
 }
 
 // Check if a simple key may start at the current position and add it if
@@ -879,13 +923,14 @@
 			possible:     true,
 			required:     required,
 			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+			mark:         parser.mark,
 		}
-		simple_key.mark = parser.mark
 
 		if !yaml_parser_remove_simple_key(parser) {
 			return false
 		}
 		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+		parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
 	}
 	return true
 }
@@ -900,19 +945,33 @@
 				"while scanning a simple key", parser.simple_keys[i].mark,
 				"could not find expected ':'")
 		}
+		// Remove the key from the stack.
+		parser.simple_keys[i].possible = false
+		delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
 	}
-	// Remove the key from the stack.
-	parser.simple_keys[i].possible = false
 	return true
 }
 
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
 // Increase the flow level and resize the simple key list if needed.
 func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
 	// Reset the simple key on the next level.
-	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+		possible:     false,
+		required:     false,
+		token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		mark:         parser.mark,
+	})
 
 	// Increase the flow level.
 	parser.flow_level++
+	if parser.flow_level > max_flow_level {
+		return yaml_parser_set_scanner_error(parser,
+			"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+			fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+	}
 	return true
 }
 
@@ -920,11 +979,16 @@
 func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
 	if parser.flow_level > 0 {
 		parser.flow_level--
-		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+		last := len(parser.simple_keys) - 1
+		delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+		parser.simple_keys = parser.simple_keys[:last]
 	}
 	return true
 }
 
+// max_indents limits the indents stack size
+const max_indents = 10000
+
 // Push the current indentation level to the stack and set the new level
 // the current column is greater than the indentation level.  In this case,
 // append or insert the specified token into the token queue.
@@ -939,6 +1003,11 @@
 		// indentation level.
 		parser.indents = append(parser.indents, parser.indent)
 		parser.indent = column
+		if len(parser.indents) > max_indents {
+			return yaml_parser_set_scanner_error(parser,
+				"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+				fmt.Sprintf("exceeded max depth of %d", max_indents))
+		}
 
 		// Create a token and insert it into the queue.
 		token := yaml_token_t{
@@ -957,19 +1026,49 @@
 // Pop indentation levels from the indents stack until the current level
 // becomes less or equal to the column.  For each indentation level, append
 // the BLOCK-END token.
-func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
 	// In the flow context, do nothing.
 	if parser.flow_level > 0 {
 		return true
 	}
 
+	block_mark := scan_mark
+	block_mark.index--
+
 	// Loop through the indentation levels in the stack.
 	for parser.indent > column {
+
+		// [Go] Reposition the end token before potential following
+		//      foot comments of parent blocks. For that, search
+		//      backwards for recent comments that were at the same
+		//      indent as the block that is ending now.
+		stop_index := block_mark.index
+		for i := len(parser.comments) - 1; i >= 0; i-- {
+			comment := &parser.comments[i]
+
+			if comment.end_mark.index < stop_index {
+				// Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+				// If requested indent column is < 0, then the document is over and everything else
+				// is a foot anyway.
+				break
+			}
+			if comment.start_mark.column == parser.indent+1 {
+				// This is a good match. But maybe there's a former comment
+				// at that same indent level, so keep searching.
+				block_mark = comment.start_mark
+			}
+
+			// While the end of the former comment matches with
+			// the start of the following one, we know there's
+			// nothing in between and scanning is still safe.
+			stop_index = comment.scan_mark.index
+		}
+
 		// Create a token and append it to the queue.
 		token := yaml_token_t{
 			typ:        yaml_BLOCK_END_TOKEN,
-			start_mark: parser.mark,
-			end_mark:   parser.mark,
+			start_mark: block_mark,
+			end_mark:   block_mark,
 		}
 		yaml_insert_token(parser, -1, &token)
 
@@ -989,6 +1088,8 @@
 	// Initialize the simple key stack.
 	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
 
+	parser.simple_keys_by_tok = make(map[int]int)
+
 	// A simple key is allowed at the beginning of the stream.
 	parser.simple_key_allowed = true
 
@@ -1016,7 +1117,7 @@
 	}
 
 	// Reset the indentation level.
-	if !yaml_parser_unroll_indent(parser, -1) {
+	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
 		return false
 	}
 
@@ -1040,7 +1141,7 @@
 // Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
 func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
 	// Reset the indentation level.
-	if !yaml_parser_unroll_indent(parser, -1) {
+	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
 		return false
 	}
 
@@ -1064,7 +1165,7 @@
 // Produce the DOCUMENT-START or DOCUMENT-END token.
 func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
 	// Reset the indentation level.
-	if !yaml_parser_unroll_indent(parser, -1) {
+	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
 		return false
 	}
 
@@ -1097,6 +1198,7 @@
 
 // Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
 func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
 	// The indicators '[' and '{' may start a simple key.
 	if !yaml_parser_save_simple_key(parser) {
 		return false
@@ -1270,7 +1372,11 @@
 	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
 
 	// Have we found a simple key?
-	if simple_key.possible {
+	if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+		return false
+
+	} else if valid {
+
 		// Create the KEY token and insert it into the queue.
 		token := yaml_token_t{
 			typ:        yaml_KEY_TOKEN,
@@ -1288,6 +1394,7 @@
 
 		// Remove the simple key.
 		simple_key.possible = false
+		delete(parser.simple_keys_by_tok, simple_key.token_number)
 
 		// A simple key cannot follow another simple key.
 		parser.simple_key_allowed = false
@@ -1427,6 +1534,8 @@
 // Eat whitespaces and comments until the next token is found.
 func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
 
+	scan_mark := parser.mark
+
 	// Until the next token is not found.
 	for {
 		// Allow the BOM mark to start a line.
@@ -1453,13 +1562,33 @@
 			}
 		}
 
+		// Check if we just had a line comment under a sequence entry that
+		// looks more like a header to the following content. Similar to this:
+		//
+		// - # The comment
+		//   - Some data
+		//
+		// If so, transform the line comment to a head comment and reposition.
+		if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+			tokenA := parser.tokens[len(parser.tokens)-2]
+			tokenB := parser.tokens[len(parser.tokens)-1]
+			comment := &parser.comments[len(parser.comments)-1]
+			if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+				// If it was in the prior line, reposition so it becomes a
+				// header of the follow up token. Otherwise, keep it in place
+				// so it becomes a header of the former.
+				comment.head = comment.line
+				comment.line = nil
+				if comment.start_mark.line == parser.mark.line-1 {
+					comment.token_mark = parser.mark
+				}
+			}
+		}
+
 		// Eat a comment until a line break.
 		if parser.buffer[parser.buffer_pos] == '#' {
-			for !is_breakz(parser.buffer, parser.buffer_pos) {
-				skip(parser)
-				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
-					return false
-				}
+			if !yaml_parser_scan_comments(parser, scan_mark) {
+				return false
 			}
 		}
 
@@ -1557,6 +1686,10 @@
 	}
 
 	if parser.buffer[parser.buffer_pos] == '#' {
+		// [Go] Discard this inline comment for the time being.
+		//if !yaml_parser_scan_line_comment(parser, start_mark) {
+		//	return false
+		//}
 		for !is_breakz(parser.buffer, parser.buffer_pos) {
 			skip(parser)
 			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
@@ -1972,7 +2105,7 @@
 	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
 	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
 	//      '%'.
-	// [Go] Convert this into more reasonable logic.
+	// [Go] TODO Convert this into more reasonable logic.
 	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
 		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
 		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
@@ -2127,6 +2260,9 @@
 		}
 	}
 	if parser.buffer[parser.buffer_pos] == '#' {
+		if !yaml_parser_scan_line_comment(parser, start_mark) {
+			return false
+		}
 		for !is_breakz(parser.buffer, parser.buffer_pos) {
 			skip(parser)
 			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
@@ -2694,3 +2830,209 @@
 	}
 	return true
 }
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+	if parser.newlines > 0 {
+		return true
+	}
+
+	var start_mark yaml_mark_t
+	var text []byte
+
+	for peek := 0; peek < 512; peek++ {
+		if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+			break
+		}
+		if is_blank(parser.buffer, parser.buffer_pos+peek) {
+			continue
+		}
+		if parser.buffer[parser.buffer_pos+peek] == '#' {
+			seen := parser.mark.index+peek
+			for {
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+				if is_breakz(parser.buffer, parser.buffer_pos) {
+					if parser.mark.index >= seen {
+						break
+					}
+					if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+						return false
+					}
+					skip_line(parser)
+				} else if parser.mark.index >= seen {
+					if len(text) == 0 {
+						start_mark = parser.mark
+					}
+					text = read(parser, text)
+				} else {
+					skip(parser)
+				}
+			}
+		}
+		break
+	}
+	if len(text) > 0 {
+		parser.comments = append(parser.comments, yaml_comment_t{
+			token_mark: token_mark,
+			start_mark: start_mark,
+			line: text,
+		})
+	}
+	return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+	token := parser.tokens[len(parser.tokens)-1]
+
+	if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+		token = parser.tokens[len(parser.tokens)-2]
+	}
+
+	var token_mark = token.start_mark
+	var start_mark yaml_mark_t
+	var next_indent = parser.indent
+	if next_indent < 0 {
+		next_indent = 0
+	}
+
+	var recent_empty = false
+	var first_empty = parser.newlines <= 1
+
+	var line = parser.mark.line
+	var column = parser.mark.column
+
+	var text []byte
+
+	// The foot line is the place where a comment must start to
+	// still be considered as a foot of the prior content.
+	// If there's some content in the currently parsed line, then
+	// the foot is the line below it.
+	var foot_line = -1
+	if scan_mark.line > 0 {
+		foot_line = parser.mark.line-parser.newlines+1
+		if parser.newlines == 0 && parser.mark.column > 1 {
+			foot_line++
+		}
+	}
+
+	var peek = 0
+	for ; peek < 512; peek++ {
+		if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+			break
+		}
+		column++
+		if is_blank(parser.buffer, parser.buffer_pos+peek) {
+			continue
+		}
+		c := parser.buffer[parser.buffer_pos+peek]
+		var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
+		if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
+			// Got line break or terminator.
+			if close_flow || !recent_empty {
+				if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
+					// This is the first empty line and there were no empty lines before,
+					// so this initial part of the comment is a foot of the prior token
+					// instead of being a head for the following one. Split it up.
+					// Alternatively, this might also be the last comment inside a flow
+					// scope, so it must be a footer.
+					if len(text) > 0 {
+						if start_mark.column-1 < next_indent {
+							// If dedented it's unrelated to the prior token.
+							token_mark = start_mark
+						}
+						parser.comments = append(parser.comments, yaml_comment_t{
+							scan_mark:  scan_mark,
+							token_mark: token_mark,
+							start_mark: start_mark,
+							end_mark:   yaml_mark_t{parser.mark.index + peek, line, column},
+							foot:       text,
+						})
+						scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+						token_mark = scan_mark
+						text = nil
+					}
+				} else {
+					if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+						text = append(text, '\n')
+					}
+				}
+			}
+			if !is_break(parser.buffer, parser.buffer_pos+peek) {
+				break
+			}
+			first_empty = false
+			recent_empty = true
+			column = 0
+			line++
+			continue
+		}
+
+		if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
+			// The comment at the different indentation is a foot of the
+			// preceding data rather than a head of the upcoming one.
+			parser.comments = append(parser.comments, yaml_comment_t{
+				scan_mark:  scan_mark,
+				token_mark: token_mark,
+				start_mark: start_mark,
+				end_mark:   yaml_mark_t{parser.mark.index + peek, line, column},
+				foot:       text,
+			})
+			scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+			token_mark = scan_mark
+			text = nil
+		}
+
+		if parser.buffer[parser.buffer_pos+peek] != '#' {
+			break
+		}
+
+		if len(text) == 0 {
+			start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+		} else {
+			text = append(text, '\n')
+		}
+
+		recent_empty = false
+
+		// Consume until after the consumed comment line.
+		seen := parser.mark.index+peek
+		for {
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+			if is_breakz(parser.buffer, parser.buffer_pos) {
+				if parser.mark.index >= seen {
+					break
+				}
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+				skip_line(parser)
+			} else if parser.mark.index >= seen {
+				text = read(parser, text)
+			} else {
+				skip(parser)
+			}
+		}
+
+		peek = 0
+		column = 0
+		line = parser.mark.line
+		next_indent = parser.indent
+		if next_indent < 0 {
+			next_indent = 0
+		}
+	}
+
+	if len(text) > 0 {
+		parser.comments = append(parser.comments, yaml_comment_t{
+			scan_mark:  scan_mark,
+			token_mark: start_mark,
+			start_mark: start_mark,
+			end_mark:   yaml_mark_t{parser.mark.index + peek - 1, line, column},
+			head:       text,
+		})
+	}
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
similarity index 76%
rename from vendor/gopkg.in/yaml.v2/sorter.go
rename to vendor/gopkg.in/yaml.v3/sorter.go
index 4c45e66..9210ece 100644
--- a/vendor/gopkg.in/yaml.v2/sorter.go
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
@@ -1,3 +1,18 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
 package yaml
 
 import (
@@ -37,8 +52,10 @@
 		return ak < bk
 	}
 	ar, br := []rune(a.String()), []rune(b.String())
+	digits := false
 	for i := 0; i < len(ar) && i < len(br); i++ {
 		if ar[i] == br[i] {
+			digits = unicode.IsDigit(ar[i])
 			continue
 		}
 		al := unicode.IsLetter(ar[i])
@@ -47,12 +64,16 @@
 			return ar[i] < br[i]
 		}
 		if al || bl {
-			return bl
+			if digits {
+				return al
+			} else {
+				return bl
+			}
 		}
 		var ai, bi int
 		var an, bn int64
 		if ar[i] == '0' || br[i] == '0' {
-			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+			for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
 				if ar[j] != '0' {
 					an = 1
 					bn = 1
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
new file mode 100644
index 0000000..b8a116b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
@@ -0,0 +1,48 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
new file mode 100644
index 0000000..8cec6da
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
@@ -0,0 +1,698 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"sync"
+	"unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+	UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	parser      *parser
+	knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+	dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder()
+	d.knownFields = dec.knownFields
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+	d := newDecoder()
+	defer handleErr(&err)
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(n, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+	defer handleErr(&err)
+	d := newDecoder()
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be excluded if IsZero returns true.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshalDoc("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshalDoc("", reflect.ValueOf(v))
+	e.finish()
+	p := newParser(e.out)
+	p.textless = true
+	defer p.destroy()
+	doc := p.parse()
+	*n = *doc.Content[0]
+	return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+	if spaces < 0 {
+		panic("yaml: cannot indent to a negative number of spaces")
+	}
+	e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+type Kind uint32
+
+const (
+	DocumentNode Kind = 1 << iota
+	SequenceNode
+	MappingNode
+	ScalarNode
+	AliasNode
+)
+
+type Style uint32
+
+const (
+	TaggedStyle Style = 1 << iota
+	DoubleQuotedStyle
+	SingleQuotedStyle
+	LiteralStyle
+	FoldedStyle
+	FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+//     var person struct {
+//             Name    string
+//             Address yaml.Node
+//     }
+//     err := yaml.Unmarshal(data, &person)
+// 
+// Or by itself:
+//
+//     var person Node
+//     err := yaml.Unmarshal(data, &person)
+//
+type Node struct {
+	// Kind defines whether the node is a document, a mapping, a sequence,
+	// a scalar value, or an alias to another node. The specific data type of
+	// scalar nodes may be obtained via the ShortTag and LongTag methods.
+	Kind  Kind
+
+	// Style allows customizing the apperance of the node in the tree.
+	Style Style
+
+	// Tag holds the YAML tag defining the data type for the value.
+	// When decoding, this field will always be set to the resolved tag,
+	// even when it wasn't explicitly provided in the YAML content.
+	// When encoding, if this field is unset the value type will be
+	// implied from the node properties, and if it is set, it will only
+	// be serialized into the representation if TaggedStyle is used or
+	// the implicit tag diverges from the provided one.
+	Tag string
+
+	// Value holds the unescaped and unquoted represenation of the value.
+	Value string
+
+	// Anchor holds the anchor name for this node, which allows aliases to point to it.
+	Anchor string
+
+	// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+	Alias *Node
+
+	// Content holds contained nodes for documents, mappings, and sequences.
+	Content []*Node
+
+	// HeadComment holds any comments in the lines preceding the node and
+	// not separated by an empty line.
+	HeadComment string
+
+	// LineComment holds any comments at the end of the line where the node is in.
+	LineComment string
+
+	// FootComment holds any comments following the node and before empty lines.
+	FootComment string
+
+	// Line and Column hold the node position in the decoded YAML text.
+	// These fields are not respected when encoding the node.
+	Line   int
+	Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+	return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+		n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+	return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+	if n.indicatedString() {
+		return strTag
+	}
+	if n.Tag == "" || n.Tag == "!" {
+		switch n.Kind {
+		case MappingNode:
+			return mapTag
+		case SequenceNode:
+			return seqTag
+		case AliasNode:
+			if n.Alias != nil {
+				return n.Alias.ShortTag()
+			}
+		case ScalarNode:
+			tag, _ := resolve("", n.Value)
+			return tag
+		case 0:
+			// Special case to make the zero value convenient.
+			if n.IsZero() {
+				return nullTag
+			}
+		}
+		return ""
+	}
+	return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+	return n.Kind == ScalarNode &&
+		(shortTag(n.Tag) == strTag ||
+			(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+	n.Kind = ScalarNode
+	if utf8.ValidString(s) {
+		n.Value = s
+		n.Tag = strTag
+	} else {
+		n.Value = encodeBase64(s)
+		n.Tag = binaryTag
+	}
+	if strings.Contains(n.Value, "\n") {
+		n.Style = LiteralStyle
+	}
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+
+	// InlineUnmarshalers holds indexes to inlined fields that
+	// contain unmarshaler values.
+	InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+	var v Unmarshaler
+	unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	inlineUnmarshalers := [][]int(nil)
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct, reflect.Ptr:
+				ftype := field.Type
+				for ftype.Kind() == reflect.Ptr {
+					ftype = ftype.Elem()
+				}
+				if ftype.Kind() != reflect.Struct {
+					return nil, errors.New("option ,inline may only be used on a struct or map field")
+				}
+				if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+					inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+				} else {
+					sinfo, err := getStructInfo(ftype)
+					if err != nil {
+						return nil, err
+					}
+					for _, index := range sinfo.InlineUnmarshalers {
+						inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+					}
+					for _, finfo := range sinfo.FieldsList {
+						if _, found := fieldsMap[finfo.Key]; found {
+							msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+							return nil, errors.New(msg)
+						}
+						if finfo.Inline == nil {
+							finfo.Inline = []int{i, finfo.Num}
+						} else {
+							finfo.Inline = append([]int{i}, finfo.Inline...)
+						}
+						finfo.Id = len(fieldsList)
+						fieldsMap[finfo.Key] = finfo
+						fieldsList = append(fieldsList, finfo)
+					}
+				}
+			default:
+				return nil, errors.New("option ,inline may only be used on a struct or map field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		info.Id = len(fieldsList)
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{
+		FieldsMap:          fieldsMap,
+		FieldsList:         fieldsList,
+		InlineMap:          inlineMap,
+		InlineUnmarshalers: inlineUnmarshalers,
+	}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
similarity index 88%
rename from vendor/gopkg.in/yaml.v2/yamlh.go
rename to vendor/gopkg.in/yaml.v3/yamlh.go
index e25cee5..7c6d007 100644
--- a/vendor/gopkg.in/yaml.v2/yamlh.go
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
@@ -1,3 +1,25 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
 package yaml
 
 import (
@@ -73,13 +95,13 @@
 // Scalar styles.
 const (
 	// Let the emitter choose the style.
-	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
 
-	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
-	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
-	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
-	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
-	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+	yaml_PLAIN_SCALAR_STYLE         yaml_scalar_style_t = 1 << iota // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE                                 // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE                                 // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE                                       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE                                        // The folded scalar style.
 )
 
 type yaml_sequence_style_t yaml_style_t
@@ -238,6 +260,7 @@
 	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
 	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
 	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+	yaml_TAIL_COMMENT_EVENT
 )
 
 var eventStrings = []string{
@@ -252,6 +275,7 @@
 	yaml_SEQUENCE_END_EVENT:   "sequence end",
 	yaml_MAPPING_START_EVENT:  "mapping start",
 	yaml_MAPPING_END_EVENT:    "mapping end",
+	yaml_TAIL_COMMENT_EVENT:   "tail comment",
 }
 
 func (e yaml_event_type_t) String() string {
@@ -279,6 +303,12 @@
 	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
 	tag_directives []yaml_tag_directive_t
 
+	// The comments
+	head_comment []byte
+	line_comment []byte
+	foot_comment []byte
+	tail_comment []byte
+
 	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
 	anchor []byte
 
@@ -554,6 +584,8 @@
 
 	unread int // The number of unread characters in the buffer.
 
+	newlines int // The number of line breaks since last non-break/non-blank character
+
 	raw_buffer     []byte // The raw buffer.
 	raw_buffer_pos int    // The current position of the buffer.
 
@@ -562,6 +594,17 @@
 	offset int         // The offset of the current position (in bytes).
 	mark   yaml_mark_t // The mark of the current position.
 
+	// Comments
+
+	head_comment []byte // The current head comments
+	line_comment []byte // The current line comments
+	foot_comment []byte // The current foot comments
+	tail_comment []byte // Foot comment that happens at the end of a block.
+	stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+	comments      []yaml_comment_t // The folded comments for all parsed tokens
+	comments_head int
+
 	// Scanner stuff
 
 	stream_start_produced bool // Have we started to scan the input stream?
@@ -579,6 +622,7 @@
 
 	simple_key_allowed bool                // May a simple key occur at the current position?
 	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+	simple_keys_by_tok map[int]int         // possible simple_key indexes indexed by token_number
 
 	// Parser stuff
 
@@ -594,6 +638,18 @@
 	document *yaml_document_t // The currently parsed document.
 }
 
+type yaml_comment_t struct {
+
+	scan_mark  yaml_mark_t // Position where scanning for comments started
+	token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+	start_mark yaml_mark_t // Position of '#' comment mark
+	end_mark   yaml_mark_t // Position where comment terminated
+
+	head []byte
+	line []byte
+	foot []byte
+}
+
 // Emitter Definitions
 
 // The prototype of a write handler.
@@ -624,8 +680,10 @@
 	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
 	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
 	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE   // Expect the next item of a flow sequence, with the comma already written out
 	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
 	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE     // Expect the next key of a flow mapping, with the comma already written out
 	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
 	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
 	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
@@ -697,6 +755,9 @@
 	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
 	open_ended bool // If an explicit document end is required?
 
+	space_above bool // Is there's an empty line above?
+	foot_indent int  // The indent used to write the foot comment above, or -1 if none.
+
 	// Anchor analysis.
 	anchor_data struct {
 		anchor []byte // The anchor value.
@@ -720,6 +781,14 @@
 		style                 yaml_scalar_style_t // The output style.
 	}
 
+	// Comments
+	head_comment []byte
+	line_comment []byte
+	foot_comment []byte
+	tail_comment []byte
+
+	key_line_comment []byte
+
 	// Dumper stuff
 
 	opened bool // If the stream was already opened?
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
similarity index 78%
rename from vendor/gopkg.in/yaml.v2/yamlprivateh.go
rename to vendor/gopkg.in/yaml.v3/yamlprivateh.go
index 8110ce3..e88f9c5 100644
--- a/vendor/gopkg.in/yaml.v2/yamlprivateh.go
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
@@ -1,3 +1,25 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
 package yaml
 
 const (
@@ -114,8 +136,9 @@
 // Check if the character is a line break or NUL.
 func is_breakz(b []byte, i int) bool {
 	//return is_break(b, i) || is_z(b, i)
-	return (        // is_break:
-	b[i] == '\r' || // CR (#xD)
+	return (
+		// is_break:
+		b[i] == '\r' || // CR (#xD)
 		b[i] == '\n' || // LF (#xA)
 		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
 		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
@@ -127,8 +150,9 @@
 // Check if the character is a line break, space, or NUL.
 func is_spacez(b []byte, i int) bool {
 	//return is_space(b, i) || is_breakz(b, i)
-	return ( // is_space:
-	b[i] == ' ' ||
+	return (
+		// is_space:
+		b[i] == ' ' ||
 		// is_breakz:
 		b[i] == '\r' || // CR (#xD)
 		b[i] == '\n' || // LF (#xA)
@@ -141,8 +165,9 @@
 // Check if the character is a line break, space, tab, or NUL.
 func is_blankz(b []byte, i int) bool {
 	//return is_blank(b, i) || is_breakz(b, i)
-	return ( // is_blank:
-	b[i] == ' ' || b[i] == '\t' ||
+	return (
+		// is_blank:
+		b[i] == ' ' || b[i] == '\t' ||
 		// is_breakz:
 		b[i] == '\r' || // CR (#xD)
 		b[i] == '\n' || // LF (#xA)