[VOL-4291] Rw-core updates for gRPC migration

Change-Id: I8d5a554409115b29318089671ca4e1ab3fa98810
diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore b/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore
deleted file mode 100644
index a1338d6..0000000
--- a/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore
+++ /dev/null
@@ -1,14 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/LICENSE b/vendor/gopkg.in/jcmturner/aescts.v1/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/gopkg.in/jcmturner/aescts.v1/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/README.md b/vendor/gopkg.in/jcmturner/aescts.v1/README.md
deleted file mode 100644
index d1fddf3..0000000
--- a/vendor/gopkg.in/jcmturner/aescts.v1/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# AES CBC Ciphertext Stealing
-[![GoDoc](https://godoc.org/gopkg.in/jcmturner/aescts.v1?status.svg)](https://godoc.org/gopkg.in/jcmturner/aescts.v1) [![Go Report Card](https://goreportcard.com/badge/gopkg.in/jcmturner/aescts.v1)](https://goreportcard.com/report/gopkg.in/jcmturner/aescts.v1)
-
-Encrypt and decrypt data using AES CBC Ciphertext stealing mode.
-
-Reference: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing
-
-To get the package, execute:
-```
-go get gopkg.in/jcmturner/aescts.v1
-```
-To import this package, add the following line to your code:
-```go
-import "gopkg.in/jcmturner/aescts.v1"
-
-```
\ No newline at end of file
diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go b/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go
deleted file mode 100644
index 278713e..0000000
--- a/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Package aescts provides AES CBC CipherText Stealing encryption and decryption methods
-package aescts
-
-import (
-	"crypto/aes"
-	"crypto/cipher"
-	"errors"
-	"fmt"
-)
-
-// Encrypt the message with the key and the initial vector.
-// Returns: next iv, ciphertext bytes, error
-func Encrypt(key, iv, plaintext []byte) ([]byte, []byte, error) {
-	l := len(plaintext)
-
-	block, err := aes.NewCipher(key)
-	if err != nil {
-		return []byte{}, []byte{}, fmt.Errorf("Error creating cipher: %v", err)
-	}
-	mode := cipher.NewCBCEncrypter(block, iv)
-
-	m := make([]byte, len(plaintext))
-	copy(m, plaintext)
-
-	/*For consistency, ciphertext stealing is always used for the last two
-	blocks of the data to be encrypted, as in [RC5].  If the data length
-	is a multiple of the block size, this is equivalent to plain CBC mode
-	with the last two ciphertext blocks swapped.*/
-	/*The initial vector carried out from one encryption for use in a
-	subsequent encryption is the next-to-last block of the encryption
-	output; this is the encrypted form of the last plaintext block.*/
-	if l <= aes.BlockSize {
-		m, _ = zeroPad(m, aes.BlockSize)
-		mode.CryptBlocks(m, m)
-		return m, m, nil
-	}
-	if l%aes.BlockSize == 0 {
-		mode.CryptBlocks(m, m)
-		iv = m[len(m)-aes.BlockSize:]
-		rb, _ := swapLastTwoBlocks(m, aes.BlockSize)
-		return iv, rb, nil
-	}
-	m, _ = zeroPad(m, aes.BlockSize)
-	rb, pb, lb, err := tailBlocks(m, aes.BlockSize)
-	if err != nil {
-		return []byte{}, []byte{}, fmt.Errorf("Error tailing blocks: %v", err)
-	}
-	var ct []byte
-	if rb != nil {
-		// Encrpt all but the lats 2 blocks and update the rolling iv
-		mode.CryptBlocks(rb, rb)
-		iv = rb[len(rb)-aes.BlockSize:]
-		mode = cipher.NewCBCEncrypter(block, iv)
-		ct = append(ct, rb...)
-	}
-	mode.CryptBlocks(pb, pb)
-	mode = cipher.NewCBCEncrypter(block, pb)
-	mode.CryptBlocks(lb, lb)
-	// Cipher Text Stealing (CTS) - Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing
-	// Swap the last two cipher blocks
-	// Truncate the ciphertext to the length of the original plaintext
-	ct = append(ct, lb...)
-	ct = append(ct, pb...)
-	return lb, ct[:l], nil
-}
-
-// Decrypt the ciphertext with the key and the initial vector.
-func Decrypt(key, iv, ciphertext []byte) ([]byte, error) {
-	// Copy the cipher text as golang slices even when passed by value to this method can result in the backing arrays of the calling code value being updated.
-	ct := make([]byte, len(ciphertext))
-	copy(ct, ciphertext)
-	if len(ct) < aes.BlockSize {
-		return []byte{}, fmt.Errorf("Ciphertext is not large enough. It is less that one block size. Blocksize:%v; Ciphertext:%v", aes.BlockSize, len(ct))
-	}
-	// Configure the CBC
-	block, err := aes.NewCipher(key)
-	if err != nil {
-		return nil, fmt.Errorf("Error creating cipher: %v", err)
-	}
-	var mode cipher.BlockMode
-
-	//If ciphertext is multiple of blocksize we just need to swap back the last two blocks and then do CBC
-	//If the ciphertext is just one block we can't swap so we just decrypt
-	if len(ct)%aes.BlockSize == 0 {
-		if len(ct) > aes.BlockSize {
-			ct, _ = swapLastTwoBlocks(ct, aes.BlockSize)
-		}
-		mode = cipher.NewCBCDecrypter(block, iv)
-		message := make([]byte, len(ct))
-		mode.CryptBlocks(message, ct)
-		return message[:len(ct)], nil
-	}
-
-	// Cipher Text Stealing (CTS) using CBC interface. Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing
-	// Get ciphertext of the 2nd to last (penultimate) block (cpb), the last block (clb) and the rest (crb)
-	crb, cpb, clb, _ := tailBlocks(ct, aes.BlockSize)
-	v := make([]byte, len(iv), len(iv))
-	copy(v, iv)
-	var message []byte
-	if crb != nil {
-		//If there is more than just the last and the penultimate block we decrypt it and the last bloc of this becomes the iv for later
-		rb := make([]byte, len(crb))
-		mode = cipher.NewCBCDecrypter(block, v)
-		v = crb[len(crb)-aes.BlockSize:]
-		mode.CryptBlocks(rb, crb)
-		message = append(message, rb...)
-	}
-
-	// We need to modify the cipher text
-	// Decryt the 2nd to last (penultimate) block with a the original iv
-	pb := make([]byte, aes.BlockSize)
-	mode = cipher.NewCBCDecrypter(block, iv)
-	mode.CryptBlocks(pb, cpb)
-	// number of byte needed to pad
-	npb := aes.BlockSize - len(ct)%aes.BlockSize
-	//pad last block using the number of bytes needed from the tail of the plaintext 2nd to last (penultimate) block
-	clb = append(clb, pb[len(pb)-npb:]...)
-
-	// Now decrypt the last block in the penultimate position (iv will be from the crb, if the is no crb it's zeros)
-	// iv for the penultimate block decrypted in the last position becomes the modified last block
-	lb := make([]byte, aes.BlockSize)
-	mode = cipher.NewCBCDecrypter(block, v)
-	v = clb
-	mode.CryptBlocks(lb, clb)
-	message = append(message, lb...)
-
-	// Now decrypt the penultimate block in the last position (iv will be from the modified last block)
-	mode = cipher.NewCBCDecrypter(block, v)
-	mode.CryptBlocks(cpb, cpb)
-	message = append(message, cpb...)
-
-	// Truncate to the size of the original cipher text
-	return message[:len(ct)], nil
-}
-
-func tailBlocks(b []byte, c int) ([]byte, []byte, []byte, error) {
-	if len(b) <= c {
-		return []byte{}, []byte{}, []byte{}, errors.New("bytes slice is not larger than one block so cannot tail")
-	}
-	// Get size of last block
-	var lbs int
-	if l := len(b) % aes.BlockSize; l == 0 {
-		lbs = aes.BlockSize
-	} else {
-		lbs = l
-	}
-	// Get last block
-	lb := b[len(b)-lbs:]
-	// Get 2nd to last (penultimate) block
-	pb := b[len(b)-lbs-c : len(b)-lbs]
-	if len(b) > 2*c {
-		rb := b[:len(b)-lbs-c]
-		return rb, pb, lb, nil
-	}
-	return nil, pb, lb, nil
-}
-
-func swapLastTwoBlocks(b []byte, c int) ([]byte, error) {
-	rb, pb, lb, err := tailBlocks(b, c)
-	if err != nil {
-		return nil, err
-	}
-	var out []byte
-	if rb != nil {
-		out = append(out, rb...)
-	}
-	out = append(out, lb...)
-	out = append(out, pb...)
-	return out, nil
-}
-
-// zeroPad pads bytes with zeros to nearest multiple of message size m.
-func zeroPad(b []byte, m int) ([]byte, error) {
-	if m <= 0 {
-		return nil, errors.New("Invalid message block size when padding")
-	}
-	if b == nil || len(b) == 0 {
-		return nil, errors.New("Data not valid to pad: Zero size")
-	}
-	if l := len(b) % m; l != 0 {
-		n := m - l
-		z := make([]byte, n)
-		b = append(b, z...)
-	}
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore b/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore
deleted file mode 100644
index a1338d6..0000000
--- a/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore
+++ /dev/null
@@ -1,14 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
-.glide/
diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml b/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml
deleted file mode 100644
index cab4f7b..0000000
--- a/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-
-go:
-  - 1.7.x
-  - 1.8.x
-  - 1.9.x
-  - master
-
-gobuild_args: -tags=integration -race
-
-sudo: required
-
-services:
-  - docker
-
-before_install:
-  - docker pull jcmturner/gokrb5:dns
-  - docker run -d -h kdc.test.gokrb5 -v /etc/localtime:/etc/localtime:ro -e "TEST_KDC_ADDR=127.0.0.1" -p 53:53 -p 53:53/udp --name dns jcmturner/gokrb5:dns
-
-before_script:
-  - sudo sed -i 's/nameserver .*/nameserver 127.0.0.1/g' /etc/resolv.conf
-
-env:
-  - DNSUTILS_OVERRIDE_NS="127.0.0.1:53"
\ No newline at end of file
diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE b/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go b/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go
deleted file mode 100644
index 15ea912..0000000
--- a/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package dnsutils
-
-import (
-	"math/rand"
-	"net"
-	"sort"
-)
-
-// OrderedSRV returns a count of the results and a map keyed on the order they should be used.
-// This based on the records' priority and randomised selection based on their relative weighting.
-// The function's inputs are the same as those for net.LookupSRV
-// To use in the correct order:
-//
-// count, orderedSRV, err := OrderedSRV(service, proto, name)
-// i := 1
-// for  i <= count {
-//   srv := orderedSRV[i]
-//   // Do something such as dial this SRV. If fails move on the the next or break if it succeeds.
-//   i += 1
-// }
-func OrderedSRV(service, proto, name string) (int, map[int]*net.SRV, error) {
-	_, addrs, err := net.LookupSRV(service, proto, name)
-	if err != nil {
-		return 0, make(map[int]*net.SRV), err
-	}
-	index, osrv := orderSRV(addrs)
-	return index, osrv, nil
-}
-
-func orderSRV(addrs []*net.SRV) (int, map[int]*net.SRV) {
-	// Initialise the ordered map
-	var o int
-	osrv := make(map[int]*net.SRV)
-
-	prioMap := make(map[int][]*net.SRV, 0)
-	for _, srv := range addrs {
-		prioMap[int(srv.Priority)] = append(prioMap[int(srv.Priority)], srv)
-	}
-
-	priorities := make([]int, 0)
-	for p := range prioMap {
-		priorities = append(priorities, p)
-	}
-
-	var count int
-	sort.Ints(priorities)
-	for _, p := range priorities {
-		tos := weightedOrder(prioMap[p])
-		for i, s := range tos {
-			count += 1
-			osrv[o+i] = s
-		}
-		o += len(tos)
-	}
-	return count, osrv
-}
-
-func weightedOrder(srvs []*net.SRV) map[int]*net.SRV {
-	// Get the total weight
-	var tw int
-	for _, s := range srvs {
-		tw += int(s.Weight)
-	}
-
-	// Initialise the ordered map
-	o := 1
-	osrv := make(map[int]*net.SRV)
-
-	// Whilst there are still entries to be ordered
-	l := len(srvs)
-	for l > 0 {
-		i := rand.Intn(l)
-		s := srvs[i]
-		var rw int
-		if tw > 0 {
-			// Greater the weight the more likely this will be zero or less
-			rw = rand.Intn(tw) - int(s.Weight)
-		}
-		if rw <= 0 {
-			// Put entry in position
-			osrv[o] = s
-			if len(srvs) > 1 {
-				// Remove the entry from the source slice by swapping with the last entry and truncating
-				srvs[len(srvs)-1], srvs[i] = srvs[i], srvs[len(srvs)-1]
-				srvs = srvs[:len(srvs)-1]
-				l = len(srvs)
-			} else {
-				l = 0
-			}
-			o += 1
-			tw = tw - int(s.Weight)
-		}
-	}
-	return osrv
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE b/vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/asn1tools/tools.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/asn1tools/tools.go
deleted file mode 100644
index f27740b..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/asn1tools/tools.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Package asn1tools provides tools for managing ASN1 marshaled data.
-package asn1tools
-
-import (
-	"github.com/jcmturner/gofork/encoding/asn1"
-)
-
-// MarshalLengthBytes returns the ASN1 encoded bytes for the length 'l'
-//
-// There are two forms: short (for lengths between 0 and 127), and long definite (for lengths between 0 and 2^1008 -1).
-//
-// Short form: One octet. Bit 8 has value "0" and bits 7-1 give the length.
-//
-// Long form: Two to 127 octets. Bit 8 of first octet has value "1" and bits 7-1 give the number of additional length octets. Second and following octets give the length, base 256, most significant digit first.
-func MarshalLengthBytes(l int) []byte {
-	if l <= 127 {
-		return []byte{byte(l)}
-	}
-	var b []byte
-	p := 1
-	for i := 1; i < 127; {
-		b = append([]byte{byte((l % (p * 256)) / p)}, b...)
-		p = p * 256
-		l = l - l%p
-		if l <= 0 {
-			break
-		}
-	}
-	return append([]byte{byte(128 + len(b))}, b...)
-}
-
-// GetLengthFromASN returns the length of a slice of ASN1 encoded bytes from the ASN1 length header it contains.
-func GetLengthFromASN(b []byte) int {
-	if int(b[1]) <= 127 {
-		return int(b[1])
-	}
-	// The bytes that indicate the length
-	lb := b[2 : 2+int(b[1])-128]
-	base := 1
-	l := 0
-	for i := len(lb) - 1; i >= 0; i-- {
-		l += int(lb[i]) * base
-		base = base * 256
-	}
-	return l
-}
-
-// GetNumberBytesInLengthHeader returns the number of bytes in the ASn1 header that indicate the length.
-func GetNumberBytesInLengthHeader(b []byte) int {
-	if int(b[1]) <= 127 {
-		return 1
-	}
-	// The bytes that indicate the length
-	return 1 + int(b[1]) - 128
-}
-
-// AddASNAppTag adds an ASN1 encoding application tag value to the raw bytes provided.
-func AddASNAppTag(b []byte, tag int) []byte {
-	r := asn1.RawValue{
-		Class:      asn1.ClassApplication,
-		IsCompound: true,
-		Tag:        tag,
-		Bytes:      b,
-	}
-	ab, _ := asn1.Marshal(r)
-	return ab
-}
-
-/*
-// The Marshal method of golang's asn1 package does not enable you to define wrapping the output in an application tag.
-// This method adds that wrapping tag.
-func AddASNAppTag(b []byte, tag int) []byte {
-	// The ASN1 wrapping consists of 2 bytes:
-	// 1st byte -> Identifier Octet - Application Tag
-	// 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here.
-	// Application Tag:
-	//| Bit:        | 8                            | 7                          | 6                                         | 5 | 4 | 3 | 2 | 1             |
-	//| Value:      | 0                            | 1                          | 1                                         | From the RFC spec 4120        |
-	//| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value |
-	// Therefore the value of the byte is an integer = ( Application tag value + 96 )
-	//b = append(MarshalLengthBytes(int(b[1])+2), b...)
-	b = append(MarshalLengthBytes(len(b)), b...)
-	b = append([]byte{byte(96 + tag)}, b...)
-	return b
-}
-*/
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/ASExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/ASExchange.go
deleted file mode 100644
index 9d1a2f3..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/ASExchange.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package client
-
-import (
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/errorcode"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/patype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// ASExchange performs an AS exchange for the client to retrieve a TGT.
-func (cl *Client) ASExchange(realm string, ASReq messages.ASReq, referral int) (messages.ASRep, error) {
-	if ok, err := cl.IsConfigured(); !ok {
-		return messages.ASRep{}, krberror.Errorf(err, krberror.ConfigError, "AS Exchange cannot be performed")
-	}
-
-	// Set PAData if required
-	err := setPAData(cl, nil, &ASReq)
-	if err != nil {
-		return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: issue with setting PAData on AS_REQ")
-	}
-
-	b, err := ASReq.Marshal()
-	if err != nil {
-		return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ")
-	}
-	var ASRep messages.ASRep
-
-	rb, err := cl.sendToKDC(b, realm)
-	if err != nil {
-		if e, ok := err.(messages.KRBError); ok {
-			switch e.ErrorCode {
-			case errorcode.KDC_ERR_PREAUTH_REQUIRED, errorcode.KDC_ERR_PREAUTH_FAILED:
-				// From now on assume this client will need to do this pre-auth and set the PAData
-				cl.settings.assumePreAuthentication = true
-				err = setPAData(cl, &e, &ASReq)
-				if err != nil {
-					return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: failed setting AS_REQ PAData for pre-authentication required")
-				}
-				b, err := ASReq.Marshal()
-				if err != nil {
-					return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ with PAData")
-				}
-				rb, err = cl.sendToKDC(b, realm)
-				if err != nil {
-					if _, ok := err.(messages.KRBError); ok {
-						return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC")
-					}
-					return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC")
-				}
-			case errorcode.KDC_ERR_WRONG_REALM:
-				// Client referral https://tools.ietf.org/html/rfc6806.html#section-7
-				if referral > 5 {
-					return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "maximum number of client referrals exceeded")
-				}
-				referral++
-				return cl.ASExchange(e.CRealm, ASReq, referral)
-			default:
-				return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC")
-			}
-		} else {
-			return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC")
-		}
-	}
-	err = ASRep.Unmarshal(rb)
-	if err != nil {
-		return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed to process the AS_REP")
-	}
-	if ok, err := ASRep.Verify(cl.Config, cl.Credentials, ASReq); !ok {
-		return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: AS_REP is not valid or client password/keytab incorrect")
-	}
-	return ASRep, nil
-}
-
-// setPAData adds pre-authentication data to the AS_REQ.
-func setPAData(cl *Client, krberr *messages.KRBError, ASReq *messages.ASReq) error {
-	if !cl.settings.DisablePAFXFAST() {
-		pa := types.PAData{PADataType: patype.PA_REQ_ENC_PA_REP}
-		ASReq.PAData = append(ASReq.PAData, pa)
-	}
-	if cl.settings.AssumePreAuthentication() {
-		// Identify the etype to use to encrypt the PA Data
-		var et etype.EType
-		var err error
-		var key types.EncryptionKey
-		if krberr == nil {
-			// This is not in response to an error from the KDC. It is preemptive or renewal
-			// There is no KRB Error that tells us the etype to use
-			etn := cl.settings.preAuthEType // Use the etype that may have previously been negotiated
-			if etn == 0 {
-				etn = int32(cl.Config.LibDefaults.PreferredPreauthTypes[0]) // Resort to config
-			}
-			et, err = crypto.GetEtype(etn)
-			if err != nil {
-				return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption")
-			}
-			key, err = cl.Key(et, nil)
-			if err != nil {
-				return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials")
-			}
-		} else {
-			// Get the etype to use from the PA data in the KRBError e-data
-			et, err = preAuthEType(krberr)
-			if err != nil {
-				return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption")
-			}
-			cl.settings.preAuthEType = et.GetETypeID() // Set the etype that has been defined for potential future use
-			key, err = cl.Key(et, krberr)
-			if err != nil {
-				return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials")
-			}
-		}
-		// Generate the PA data
-		paTSb, err := types.GetPAEncTSEncAsnMarshalled()
-		if err != nil {
-			return krberror.Errorf(err, krberror.KRBMsgError, "error creating PAEncTSEnc for Pre-Authentication")
-		}
-		//TODO (theme: KVNO from keytab) the kvno should not be hard coded to 1 as this hampers troubleshooting.
-		paEncTS, err := crypto.GetEncryptedData(paTSb, key, keyusage.AS_REQ_PA_ENC_TIMESTAMP, 1)
-		if err != nil {
-			return krberror.Errorf(err, krberror.EncryptingError, "error encrypting pre-authentication timestamp")
-		}
-		pb, err := paEncTS.Marshal()
-		if err != nil {
-			return krberror.Errorf(err, krberror.EncodingError, "error marshaling the PAEncTSEnc encrypted data")
-		}
-		pa := types.PAData{
-			PADataType:  patype.PA_ENC_TIMESTAMP,
-			PADataValue: pb,
-		}
-		// Look for and delete any exiting patype.PA_ENC_TIMESTAMP
-		for i, pa := range ASReq.PAData {
-			if pa.PADataType == patype.PA_ENC_TIMESTAMP {
-				ASReq.PAData[i] = ASReq.PAData[len(ASReq.PAData)-1]
-				ASReq.PAData = ASReq.PAData[:len(ASReq.PAData)-1]
-			}
-		}
-		ASReq.PAData = append(ASReq.PAData, pa)
-	}
-	return nil
-}
-
-// preAuthEType establishes what encryption type to use for pre-authentication from the KRBError returned from the KDC.
-func preAuthEType(krberr *messages.KRBError) (etype etype.EType, err error) {
-	//The preferred ordering of the "hint" pre-authentication data that
-	//affect client key selection is: ETYPE-INFO2, followed by ETYPE-INFO,
-	//followed by PW-SALT.
-	//A KDC SHOULD NOT send PA-PW-SALT when issuing a KRB-ERROR message
-	//that requests additional pre-authentication.  Implementation note:
-	//Some KDC implementations issue an erroneous PA-PW-SALT when issuing a
-	//KRB-ERROR message that requests additional pre-authentication.
-	//Therefore, clients SHOULD ignore a PA-PW-SALT accompanying a
-	//KRB-ERROR message that requests additional pre-authentication.
-	var etypeID int32
-	var pas types.PADataSequence
-	e := pas.Unmarshal(krberr.EData)
-	if e != nil {
-		err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling KRBError data")
-		return
-	}
-	for _, pa := range pas {
-		switch pa.PADataType {
-		case patype.PA_ETYPE_INFO2:
-			info, e := pa.GetETypeInfo2()
-			if e != nil {
-				err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO2 data")
-				return
-			}
-			etypeID = info[0].EType
-			break
-		case patype.PA_ETYPE_INFO:
-			info, e := pa.GetETypeInfo()
-			if e != nil {
-				err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO data")
-				return
-			}
-			etypeID = info[0].EType
-		}
-	}
-	etype, e = crypto.GetEtype(etypeID)
-	if e != nil {
-		err = krberror.Errorf(e, krberror.EncryptingError, "error creating etype")
-		return
-	}
-	return etype, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go
deleted file mode 100644
index 8ad3e55..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/TGSExchange.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package client
-
-import (
-	"gopkg.in/jcmturner/gokrb5.v7/iana/flags"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/nametype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// TGSREQGenerateAndExchange generates the TGS_REQ and performs a TGS exchange to retrieve a ticket to the specified SPN.
-func (cl *Client) TGSREQGenerateAndExchange(spn types.PrincipalName, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, renewal bool) (tgsReq messages.TGSReq, tgsRep messages.TGSRep, err error) {
-	tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, spn, renewal)
-	if err != nil {
-		return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: failed to generate a new TGS_REQ")
-	}
-	return cl.TGSExchange(tgsReq, kdcRealm, tgsRep.Ticket, sessionKey, 0)
-}
-
-// TGSExchange exchanges the provided TGS_REQ with the KDC to retrieve a TGS_REP.
-// Referrals are automatically handled.
-// The client's cache is updated with the ticket received.
-func (cl *Client) TGSExchange(tgsReq messages.TGSReq, kdcRealm string, tgt messages.Ticket, sessionKey types.EncryptionKey, referral int) (messages.TGSReq, messages.TGSRep, error) {
-	var tgsRep messages.TGSRep
-	b, err := tgsReq.Marshal()
-	if err != nil {
-		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to marshal TGS_REQ")
-	}
-	r, err := cl.sendToKDC(b, kdcRealm)
-	if err != nil {
-		if _, ok := err.(messages.KRBError); ok {
-			return tgsReq, tgsRep, krberror.Errorf(err, krberror.KDCError, "TGS Exchange Error: kerberos error response from KDC when requesting for %s", tgsReq.ReqBody.SName.PrincipalNameString())
-		}
-		return tgsReq, tgsRep, krberror.Errorf(err, krberror.NetworkingError, "TGS Exchange Error: issue sending TGS_REQ to KDC")
-	}
-	err = tgsRep.Unmarshal(r)
-	if err != nil {
-		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP")
-	}
-	err = tgsRep.DecryptEncPart(sessionKey)
-	if err != nil {
-		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP")
-	}
-	if ok, err := tgsRep.Verify(cl.Config, tgsReq); !ok {
-		return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: TGS_REP is not valid")
-	}
-
-	if tgsRep.Ticket.SName.NameString[0] == "krbtgt" && !tgsRep.Ticket.SName.Equal(tgsReq.ReqBody.SName) {
-		if referral > 5 {
-			return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: maximum number of referrals exceeded")
-		}
-		// Server referral https://tools.ietf.org/html/rfc6806.html#section-8
-		// The TGS Rep contains a TGT for another domain as the service resides in that domain.
-		cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart)
-		realm := tgsRep.Ticket.SName.NameString[len(tgsRep.Ticket.SName.NameString)-1]
-		referral++
-		if types.IsFlagSet(&tgsReq.ReqBody.KDCOptions, flags.EncTktInSkey) && len(tgsReq.ReqBody.AdditionalTickets) > 0 {
-			tgsReq, err = messages.NewUser2UserTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, tgsReq.ReqBody.SName, tgsReq.Renewal, tgsReq.ReqBody.AdditionalTickets[0])
-			if err != nil {
-				return tgsReq, tgsRep, err
-			}
-		}
-		tgsReq, err = messages.NewTGSReq(cl.Credentials.CName(), kdcRealm, cl.Config, tgt, sessionKey, tgsReq.ReqBody.SName, tgsReq.Renewal)
-		if err != nil {
-			return tgsReq, tgsRep, err
-		}
-		return cl.TGSExchange(tgsReq, realm, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, referral)
-	}
-	cl.cache.addEntry(
-		tgsRep.Ticket,
-		tgsRep.DecryptedEncPart.AuthTime,
-		tgsRep.DecryptedEncPart.StartTime,
-		tgsRep.DecryptedEncPart.EndTime,
-		tgsRep.DecryptedEncPart.RenewTill,
-		tgsRep.DecryptedEncPart.Key,
-	)
-	cl.Log("ticket added to cache for %s (EndTime: %v)", tgsRep.Ticket.SName.PrincipalNameString(), tgsRep.DecryptedEncPart.EndTime)
-	return tgsReq, tgsRep, err
-}
-
-// GetServiceTicket makes a request to get a service ticket for the SPN specified
-// SPN format: <SERVICE>/<FQDN> Eg. HTTP/www.example.com
-// The ticket will be added to the client's ticket cache
-func (cl *Client) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) {
-	var tkt messages.Ticket
-	var skey types.EncryptionKey
-	if tkt, skey, ok := cl.GetCachedTicket(spn); ok {
-		// Already a valid ticket in the cache
-		return tkt, skey, nil
-	}
-	princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn)
-	realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1])
-
-	tgt, skey, err := cl.sessionTGT(realm)
-	if err != nil {
-		return tkt, skey, err
-	}
-	_, tgsRep, err := cl.TGSREQGenerateAndExchange(princ, realm, tgt, skey, false)
-	if err != nil {
-		return tkt, skey, err
-	}
-	return tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/cache.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/cache.go
deleted file mode 100644
index 07b4a01..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/cache.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package client
-
-import (
-	"errors"
-	"sync"
-	"time"
-
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// Cache for service tickets held by the client.
-type Cache struct {
-	Entries map[string]CacheEntry
-	mux     sync.RWMutex
-}
-
-// CacheEntry holds details for a cache entry.
-type CacheEntry struct {
-	Ticket     messages.Ticket
-	AuthTime   time.Time
-	StartTime  time.Time
-	EndTime    time.Time
-	RenewTill  time.Time
-	SessionKey types.EncryptionKey
-}
-
-// NewCache creates a new client ticket cache instance.
-func NewCache() *Cache {
-	return &Cache{
-		Entries: map[string]CacheEntry{},
-	}
-}
-
-// getEntry returns a cache entry that matches the SPN.
-func (c *Cache) getEntry(spn string) (CacheEntry, bool) {
-	c.mux.RLock()
-	defer c.mux.RUnlock()
-	e, ok := (*c).Entries[spn]
-	return e, ok
-}
-
-// addEntry adds a ticket to the cache.
-func (c *Cache) addEntry(tkt messages.Ticket, authTime, startTime, endTime, renewTill time.Time, sessionKey types.EncryptionKey) CacheEntry {
-	spn := tkt.SName.PrincipalNameString()
-	c.mux.Lock()
-	defer c.mux.Unlock()
-	(*c).Entries[spn] = CacheEntry{
-		Ticket:     tkt,
-		AuthTime:   authTime,
-		StartTime:  startTime,
-		EndTime:    endTime,
-		RenewTill:  renewTill,
-		SessionKey: sessionKey,
-	}
-	return c.Entries[spn]
-}
-
-// clear deletes all the cache entries
-func (c *Cache) clear() {
-	c.mux.Lock()
-	defer c.mux.Unlock()
-	for k := range c.Entries {
-		delete(c.Entries, k)
-	}
-}
-
-// RemoveEntry removes the cache entry for the defined SPN.
-func (c *Cache) RemoveEntry(spn string) {
-	c.mux.Lock()
-	defer c.mux.Unlock()
-	delete(c.Entries, spn)
-}
-
-// GetCachedTicket returns a ticket from the cache for the SPN.
-// Only a ticket that is currently valid will be returned.
-func (cl *Client) GetCachedTicket(spn string) (messages.Ticket, types.EncryptionKey, bool) {
-	if e, ok := cl.cache.getEntry(spn); ok {
-		//If within time window of ticket return it
-		if time.Now().UTC().After(e.StartTime) && time.Now().UTC().Before(e.EndTime) {
-			cl.Log("ticket received from cache for %s", spn)
-			return e.Ticket, e.SessionKey, true
-		} else if time.Now().UTC().Before(e.RenewTill) {
-			e, err := cl.renewTicket(e)
-			if err != nil {
-				return e.Ticket, e.SessionKey, false
-			}
-			return e.Ticket, e.SessionKey, true
-		}
-	}
-	var tkt messages.Ticket
-	var key types.EncryptionKey
-	return tkt, key, false
-}
-
-// renewTicket renews a cache entry ticket.
-// To renew from outside the client package use GetCachedTicket
-func (cl *Client) renewTicket(e CacheEntry) (CacheEntry, error) {
-	spn := e.Ticket.SName
-	_, _, err := cl.TGSREQGenerateAndExchange(spn, e.Ticket.Realm, e.Ticket, e.SessionKey, true)
-	if err != nil {
-		return e, err
-	}
-	e, ok := cl.cache.getEntry(e.Ticket.SName.PrincipalNameString())
-	if !ok {
-		return e, errors.New("ticket was not added to cache")
-	}
-	cl.Log("ticket renewed for %s (EndTime: %v)", spn.PrincipalNameString(), e.EndTime)
-	return e, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go
deleted file mode 100644
index 6e4c83c..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/client.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Package client provides a client library and methods for Kerberos 5 authentication.
-package client
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	"gopkg.in/jcmturner/gokrb5.v7/config"
-	"gopkg.in/jcmturner/gokrb5.v7/credentials"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/errorcode"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/nametype"
-	"gopkg.in/jcmturner/gokrb5.v7/keytab"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// Client side configuration and state.
-type Client struct {
-	Credentials *credentials.Credentials
-	Config      *config.Config
-	settings    *Settings
-	sessions    *sessions
-	cache       *Cache
-}
-
-// NewClientWithPassword creates a new client from a password credential.
-// Set the realm to empty string to use the default realm from config.
-func NewClientWithPassword(username, realm, password string, krb5conf *config.Config, settings ...func(*Settings)) *Client {
-	creds := credentials.New(username, realm)
-	return &Client{
-		Credentials: creds.WithPassword(password),
-		Config:      krb5conf,
-		settings:    NewSettings(settings...),
-		sessions: &sessions{
-			Entries: make(map[string]*session),
-		},
-		cache: NewCache(),
-	}
-}
-
-// NewClientWithKeytab creates a new client from a keytab credential.
-func NewClientWithKeytab(username, realm string, kt *keytab.Keytab, krb5conf *config.Config, settings ...func(*Settings)) *Client {
-	creds := credentials.New(username, realm)
-	return &Client{
-		Credentials: creds.WithKeytab(kt),
-		Config:      krb5conf,
-		settings:    NewSettings(settings...),
-		sessions: &sessions{
-			Entries: make(map[string]*session),
-		},
-		cache: NewCache(),
-	}
-}
-
-// NewClientFromCCache create a client from a populated client cache.
-//
-// WARNING: A client created from CCache does not automatically renew TGTs and a failure will occur after the TGT expires.
-func NewClientFromCCache(c *credentials.CCache, krb5conf *config.Config, settings ...func(*Settings)) (*Client, error) {
-	cl := &Client{
-		Credentials: c.GetClientCredentials(),
-		Config:      krb5conf,
-		settings:    NewSettings(settings...),
-		sessions: &sessions{
-			Entries: make(map[string]*session),
-		},
-		cache: NewCache(),
-	}
-	spn := types.PrincipalName{
-		NameType:   nametype.KRB_NT_SRV_INST,
-		NameString: []string{"krbtgt", c.DefaultPrincipal.Realm},
-	}
-	cred, ok := c.GetEntry(spn)
-	if !ok {
-		return cl, errors.New("TGT not found in CCache")
-	}
-	var tgt messages.Ticket
-	err := tgt.Unmarshal(cred.Ticket)
-	if err != nil {
-		return cl, fmt.Errorf("TGT bytes in cache are not valid: %v", err)
-	}
-	cl.sessions.Entries[c.DefaultPrincipal.Realm] = &session{
-		realm:      c.DefaultPrincipal.Realm,
-		authTime:   cred.AuthTime,
-		endTime:    cred.EndTime,
-		renewTill:  cred.RenewTill,
-		tgt:        tgt,
-		sessionKey: cred.Key,
-	}
-	for _, cred := range c.GetEntries() {
-		var tkt messages.Ticket
-		err = tkt.Unmarshal(cred.Ticket)
-		if err != nil {
-			return cl, fmt.Errorf("cache entry ticket bytes are not valid: %v", err)
-		}
-		cl.cache.addEntry(
-			tkt,
-			cred.AuthTime,
-			cred.StartTime,
-			cred.EndTime,
-			cred.RenewTill,
-			cred.Key,
-		)
-	}
-	return cl, nil
-}
-
-// Key returns the client's encryption key for the specified encryption type.
-// The key can be retrieved either from the keytab or generated from the client's password.
-// If the client has both a keytab and a password defined the keytab is favoured as the source for the key
-// A KRBError can be passed in the event the KDC returns one of type KDC_ERR_PREAUTH_REQUIRED and is required to derive
-// the key for pre-authentication from the client's password. If a KRBError is not available, pass nil to this argument.
-func (cl *Client) Key(etype etype.EType, krberr *messages.KRBError) (types.EncryptionKey, error) {
-	if cl.Credentials.HasKeytab() && etype != nil {
-		return cl.Credentials.Keytab().GetEncryptionKey(cl.Credentials.CName(), cl.Credentials.Domain(), 0, etype.GetETypeID())
-	} else if cl.Credentials.HasPassword() {
-		if krberr != nil && krberr.ErrorCode == errorcode.KDC_ERR_PREAUTH_REQUIRED {
-			var pas types.PADataSequence
-			err := pas.Unmarshal(krberr.EData)
-			if err != nil {
-				return types.EncryptionKey{}, fmt.Errorf("could not get PAData from KRBError to generate key from password: %v", err)
-			}
-			key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), krberr.CName, krberr.CRealm, etype.GetETypeID(), pas)
-			return key, err
-		}
-		key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password(), cl.Credentials.CName(), cl.Credentials.Domain(), etype.GetETypeID(), types.PADataSequence{})
-		return key, err
-	}
-	return types.EncryptionKey{}, errors.New("credential has neither keytab or password to generate key")
-}
-
-// IsConfigured indicates if the client has the values required set.
-func (cl *Client) IsConfigured() (bool, error) {
-	if cl.Credentials.UserName() == "" {
-		return false, errors.New("client does not have a username")
-	}
-	if cl.Credentials.Domain() == "" {
-		return false, errors.New("client does not have a define realm")
-	}
-	// Client needs to have either a password, keytab or a session already (later when loading from CCache)
-	if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() {
-		authTime, _, _, _, err := cl.sessionTimes(cl.Credentials.Domain())
-		if err != nil || authTime.IsZero() {
-			return false, errors.New("client has neither a keytab nor a password set and no session")
-		}
-	}
-	if !cl.Config.LibDefaults.DNSLookupKDC {
-		for _, r := range cl.Config.Realms {
-			if r.Realm == cl.Credentials.Domain() {
-				if len(r.KDC) > 0 {
-					return true, nil
-				}
-				return false, errors.New("client krb5 config does not have any defined KDCs for the default realm")
-			}
-		}
-	}
-	return true, nil
-}
-
-// Login the client with the KDC via an AS exchange.
-func (cl *Client) Login() error {
-	if ok, err := cl.IsConfigured(); !ok {
-		return err
-	}
-	if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() {
-		_, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain())
-		if err != nil {
-			return krberror.Errorf(err, krberror.KRBMsgError, "no user credentials available and error getting any existing session")
-		}
-		if time.Now().UTC().After(endTime) {
-			return krberror.NewKrberror(krberror.KRBMsgError, "cannot login, no user credentials available and no valid existing session")
-		}
-		// no credentials but there is a session with tgt already
-		return nil
-	}
-	ASReq, err := messages.NewASReqForTGT(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName())
-	if err != nil {
-		return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AS_REQ")
-	}
-	ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0)
-	if err != nil {
-		return err
-	}
-	cl.addSession(ASRep.Ticket, ASRep.DecryptedEncPart)
-	return nil
-}
-
-// realmLogin obtains or renews a TGT and establishes a session for the realm specified.
-func (cl *Client) realmLogin(realm string) error {
-	if realm == cl.Credentials.Domain() {
-		return cl.Login()
-	}
-	_, endTime, _, _, err := cl.sessionTimes(cl.Credentials.Domain())
-	if err != nil || time.Now().UTC().After(endTime) {
-		err := cl.Login()
-		if err != nil {
-			return fmt.Errorf("could not get valid TGT for client's realm: %v", err)
-		}
-	}
-	tgt, skey, err := cl.sessionTGT(cl.Credentials.Domain())
-	if err != nil {
-		return err
-	}
-
-	spn := types.PrincipalName{
-		NameType:   nametype.KRB_NT_SRV_INST,
-		NameString: []string{"krbtgt", realm},
-	}
-
-	_, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, false)
-	if err != nil {
-		return err
-	}
-	cl.addSession(tgsRep.Ticket, tgsRep.DecryptedEncPart)
-
-	return nil
-}
-
-// Destroy stops the auto-renewal of all sessions and removes the sessions and cache entries from the client.
-func (cl *Client) Destroy() {
-	creds := credentials.New("", "")
-	cl.sessions.destroy()
-	cl.cache.clear()
-	cl.Credentials = creds
-	cl.Log("client destroyed")
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/network.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/network.go
deleted file mode 100644
index 493fb2f..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/network.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package client
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"net"
-	"time"
-
-	"gopkg.in/jcmturner/gokrb5.v7/iana/errorcode"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-)
-
-// SendToKDC performs network actions to send data to the KDC.
-func (cl *Client) sendToKDC(b []byte, realm string) ([]byte, error) {
-	var rb []byte
-	if cl.Config.LibDefaults.UDPPreferenceLimit == 1 {
-		//1 means we should always use TCP
-		rb, errtcp := cl.sendKDCTCP(realm, b)
-		if errtcp != nil {
-			if e, ok := errtcp.(messages.KRBError); ok {
-				return rb, e
-			}
-			return rb, fmt.Errorf("communication error with KDC via TCP: %v", errtcp)
-		}
-		return rb, nil
-	}
-	if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit {
-		//Try UDP first, TCP second
-		rb, errudp := cl.sendKDCUDP(realm, b)
-		if errudp != nil {
-			if e, ok := errudp.(messages.KRBError); ok && e.ErrorCode != errorcode.KRB_ERR_RESPONSE_TOO_BIG {
-				// Got a KRBError from KDC
-				// If this is not a KRB_ERR_RESPONSE_TOO_BIG we will return immediately otherwise will try TCP.
-				return rb, e
-			}
-			// Try TCP
-			r, errtcp := cl.sendKDCTCP(realm, b)
-			if errtcp != nil {
-				if e, ok := errtcp.(messages.KRBError); ok {
-					// Got a KRBError
-					return r, e
-				}
-				return r, fmt.Errorf("failed to communicate with KDC. Attempts made with UDP (%v) and then TCP (%v)", errudp, errtcp)
-			}
-			rb = r
-		}
-		return rb, nil
-	}
-	//Try TCP first, UDP second
-	rb, errtcp := cl.sendKDCTCP(realm, b)
-	if errtcp != nil {
-		if e, ok := errtcp.(messages.KRBError); ok {
-			// Got a KRBError from KDC so returning and not trying UDP.
-			return rb, e
-		}
-		rb, errudp := cl.sendKDCUDP(realm, b)
-		if errudp != nil {
-			if e, ok := errudp.(messages.KRBError); ok {
-				// Got a KRBError
-				return rb, e
-			}
-			return rb, fmt.Errorf("failed to communicate with KDC. Attempts made with TCP (%v) and then UDP (%v)", errtcp, errudp)
-		}
-	}
-	return rb, nil
-}
-
-// dialKDCTCP establishes a UDP connection to a KDC.
-func dialKDCUDP(count int, kdcs map[int]string) (*net.UDPConn, error) {
-	i := 1
-	for i <= count {
-		udpAddr, err := net.ResolveUDPAddr("udp", kdcs[i])
-		if err != nil {
-			return nil, fmt.Errorf("error resolving KDC address: %v", err)
-		}
-
-		conn, err := net.DialTimeout("udp", udpAddr.String(), 5*time.Second)
-		if err == nil {
-			if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil {
-				return nil, err
-			}
-			// conn is guaranteed to be a UDPConn
-			return conn.(*net.UDPConn), nil
-		}
-		i++
-	}
-	return nil, errors.New("error in getting a UDP connection to any of the KDCs")
-}
-
-// dialKDCTCP establishes a TCP connection to a KDC.
-func dialKDCTCP(count int, kdcs map[int]string) (*net.TCPConn, error) {
-	i := 1
-	for i <= count {
-		tcpAddr, err := net.ResolveTCPAddr("tcp", kdcs[i])
-		if err != nil {
-			return nil, fmt.Errorf("error resolving KDC address: %v", err)
-		}
-
-		conn, err := net.DialTimeout("tcp", tcpAddr.String(), 5*time.Second)
-		if err == nil {
-			if err := conn.SetDeadline(time.Now().Add(5 * time.Second)); err != nil {
-				return nil, err
-			}
-			// conn is guaranteed to be a TCPConn
-			return conn.(*net.TCPConn), nil
-		}
-		i++
-	}
-	return nil, errors.New("error in getting a TCP connection to any of the KDCs")
-}
-
-// sendKDCUDP sends bytes to the KDC via UDP.
-func (cl *Client) sendKDCUDP(realm string, b []byte) ([]byte, error) {
-	var r []byte
-	count, kdcs, err := cl.Config.GetKDCs(realm, false)
-	if err != nil {
-		return r, err
-	}
-	conn, err := dialKDCUDP(count, kdcs)
-	if err != nil {
-		return r, err
-	}
-	r, err = cl.sendUDP(conn, b)
-	if err != nil {
-		return r, err
-	}
-	return checkForKRBError(r)
-}
-
-// sendKDCTCP sends bytes to the KDC via TCP.
-func (cl *Client) sendKDCTCP(realm string, b []byte) ([]byte, error) {
-	var r []byte
-	count, kdcs, err := cl.Config.GetKDCs(realm, true)
-	if err != nil {
-		return r, err
-	}
-	conn, err := dialKDCTCP(count, kdcs)
-	if err != nil {
-		return r, err
-	}
-	rb, err := cl.sendTCP(conn, b)
-	if err != nil {
-		return r, err
-	}
-	return checkForKRBError(rb)
-}
-
-// sendUDP sends bytes to connection over UDP.
-func (cl *Client) sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) {
-	var r []byte
-	defer conn.Close()
-	_, err := conn.Write(b)
-	if err != nil {
-		return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err)
-	}
-	udpbuf := make([]byte, 4096)
-	n, _, err := conn.ReadFrom(udpbuf)
-	r = udpbuf[:n]
-	if err != nil {
-		return r, fmt.Errorf("sending over UDP failed to %s: %v", conn.RemoteAddr().String(), err)
-	}
-	if len(r) < 1 {
-		return r, fmt.Errorf("no response data from %s", conn.RemoteAddr().String())
-	}
-	return r, nil
-}
-
-// sendTCP sends bytes to connection over TCP.
-func (cl *Client) sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) {
-	defer conn.Close()
-	var r []byte
-	/*
-		RFC https://tools.ietf.org/html/rfc4120#section-7.2.2
-		Each request (KRB_KDC_REQ) and response (KRB_KDC_REP or KRB_ERROR)
-		sent over the TCP stream is preceded by the length of the request as
-		4 octets in network byte order.  The high bit of the length is
-		reserved for future expansion and MUST currently be set to zero.  If
-		a KDC that does not understand how to interpret a set high bit of the
-		length encoding receives a request with the high order bit of the
-		length set, it MUST return a KRB-ERROR message with the error
-		KRB_ERR_FIELD_TOOLONG and MUST close the TCP stream.
-		NB: network byte order == big endian
-	*/
-	var buf bytes.Buffer
-	err := binary.Write(&buf, binary.BigEndian, uint32(len(b)))
-	if err != nil {
-		return r, err
-	}
-	b = append(buf.Bytes(), b...)
-
-	_, err = conn.Write(b)
-	if err != nil {
-		return r, fmt.Errorf("error sending to KDC (%s): %v", conn.RemoteAddr().String(), err)
-	}
-
-	sh := make([]byte, 4, 4)
-	_, err = conn.Read(sh)
-	if err != nil {
-		return r, fmt.Errorf("error reading response size header: %v", err)
-	}
-	s := binary.BigEndian.Uint32(sh)
-
-	rb := make([]byte, s, s)
-	_, err = io.ReadFull(conn, rb)
-	if err != nil {
-		return r, fmt.Errorf("error reading response: %v", err)
-	}
-	if len(rb) < 1 {
-		return r, fmt.Errorf("no response data from KDC %s", conn.RemoteAddr().String())
-	}
-	return rb, nil
-}
-
-// checkForKRBError checks if the response bytes from the KDC are a KRBError.
-func checkForKRBError(b []byte) ([]byte, error) {
-	var KRBErr messages.KRBError
-	if err := KRBErr.Unmarshal(b); err == nil {
-		return b, KRBErr
-	}
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go
deleted file mode 100644
index e6d4180..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/passwd.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package client
-
-import (
-	"fmt"
-	"net"
-
-	"gopkg.in/jcmturner/gokrb5.v7/kadmin"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-)
-
-// Kpasswd server response codes.
-const (
-	KRB5_KPASSWD_SUCCESS             = 0
-	KRB5_KPASSWD_MALFORMED           = 1
-	KRB5_KPASSWD_HARDERROR           = 2
-	KRB5_KPASSWD_AUTHERROR           = 3
-	KRB5_KPASSWD_SOFTERROR           = 4
-	KRB5_KPASSWD_ACCESSDENIED        = 5
-	KRB5_KPASSWD_BAD_VERSION         = 6
-	KRB5_KPASSWD_INITIAL_FLAG_NEEDED = 7
-)
-
-// ChangePasswd changes the password of the client to the value provided.
-func (cl *Client) ChangePasswd(newPasswd string) (bool, error) {
-	ASReq, err := messages.NewASReqForChgPasswd(cl.Credentials.Domain(), cl.Config, cl.Credentials.CName())
-	if err != nil {
-		return false, err
-	}
-	ASRep, err := cl.ASExchange(cl.Credentials.Domain(), ASReq, 0)
-	if err != nil {
-		return false, err
-	}
-
-	msg, key, err := kadmin.ChangePasswdMsg(cl.Credentials.CName(), cl.Credentials.Domain(), newPasswd, ASRep.Ticket, ASRep.DecryptedEncPart.Key)
-	if err != nil {
-		return false, err
-	}
-	r, err := cl.sendToKPasswd(msg)
-	if err != nil {
-		return false, err
-	}
-	err = r.Decrypt(key)
-	if err != nil {
-		return false, err
-	}
-	if r.ResultCode != KRB5_KPASSWD_SUCCESS {
-		return false, fmt.Errorf("error response from kdamin: %s", r.Result)
-	}
-	cl.Credentials.WithPassword(newPasswd)
-	return true, nil
-}
-
-func (cl *Client) sendToKPasswd(msg kadmin.Request) (r kadmin.Reply, err error) {
-	_, kps, err := cl.Config.GetKpasswdServers(cl.Credentials.Domain(), true)
-	if err != nil {
-		return
-	}
-	addr := kps[1]
-	b, err := msg.Marshal()
-	if err != nil {
-		return
-	}
-	if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit {
-		return cl.sendKPasswdUDP(b, addr)
-	}
-	return cl.sendKPasswdTCP(b, addr)
-}
-
-func (cl *Client) sendKPasswdTCP(b []byte, kadmindAddr string) (r kadmin.Reply, err error) {
-	tcpAddr, err := net.ResolveTCPAddr("tcp", kadmindAddr)
-	if err != nil {
-		return
-	}
-	conn, err := net.DialTCP("tcp", nil, tcpAddr)
-	if err != nil {
-		return
-	}
-	rb, err := cl.sendTCP(conn, b)
-	err = r.Unmarshal(rb)
-	return
-}
-
-func (cl *Client) sendKPasswdUDP(b []byte, kadmindAddr string) (r kadmin.Reply, err error) {
-	udpAddr, err := net.ResolveUDPAddr("udp", kadmindAddr)
-	if err != nil {
-		return
-	}
-	conn, err := net.DialUDP("udp", nil, udpAddr)
-	if err != nil {
-		return
-	}
-	rb, err := cl.sendUDP(conn, b)
-	err = r.Unmarshal(rb)
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/session.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/session.go
deleted file mode 100644
index ec6c513..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/session.go
+++ /dev/null
@@ -1,255 +0,0 @@
-package client
-
-import (
-	"fmt"
-	"strings"
-	"sync"
-	"time"
-
-	"gopkg.in/jcmturner/gokrb5.v7/iana/nametype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// sessions hold TGTs and are keyed on the realm name
-type sessions struct {
-	Entries map[string]*session
-	mux     sync.RWMutex
-}
-
-// destroy erases all sessions
-func (s *sessions) destroy() {
-	s.mux.Lock()
-	defer s.mux.Unlock()
-	for k, e := range s.Entries {
-		e.destroy()
-		delete(s.Entries, k)
-	}
-}
-
-// update replaces a session with the one provided or adds it as a new one
-func (s *sessions) update(sess *session) {
-	s.mux.Lock()
-	defer s.mux.Unlock()
-	// if a session already exists for this, cancel its auto renew.
-	if i, ok := s.Entries[sess.realm]; ok {
-		if i != sess {
-			// Session in the sessions cache is not the same as one provided.
-			// Cancel the one in the cache and add this one.
-			i.mux.Lock()
-			defer i.mux.Unlock()
-			i.cancel <- true
-			s.Entries[sess.realm] = sess
-			return
-		}
-	}
-	// No session for this realm was found so just add it
-	s.Entries[sess.realm] = sess
-}
-
-// get returns the session for the realm specified
-func (s *sessions) get(realm string) (*session, bool) {
-	s.mux.RLock()
-	defer s.mux.RUnlock()
-	sess, ok := s.Entries[realm]
-	return sess, ok
-}
-
-// session holds the TGT details for a realm
-type session struct {
-	realm                string
-	authTime             time.Time
-	endTime              time.Time
-	renewTill            time.Time
-	tgt                  messages.Ticket
-	sessionKey           types.EncryptionKey
-	sessionKeyExpiration time.Time
-	cancel               chan bool
-	mux                  sync.RWMutex
-}
-
-// AddSession adds a session for a realm with a TGT to the client's session cache.
-// A goroutine is started to automatically renew the TGT before expiry.
-func (cl *Client) addSession(tgt messages.Ticket, dep messages.EncKDCRepPart) {
-	if strings.ToLower(tgt.SName.NameString[0]) != "krbtgt" {
-		// Not a TGT
-		return
-	}
-	realm := tgt.SName.NameString[len(tgt.SName.NameString)-1]
-	s := &session{
-		realm:                realm,
-		authTime:             dep.AuthTime,
-		endTime:              dep.EndTime,
-		renewTill:            dep.RenewTill,
-		tgt:                  tgt,
-		sessionKey:           dep.Key,
-		sessionKeyExpiration: dep.KeyExpiration,
-	}
-	cl.sessions.update(s)
-	cl.enableAutoSessionRenewal(s)
-	cl.Log("TGT session added for %s (EndTime: %v)", realm, dep.EndTime)
-}
-
-// update overwrites the session details with those from the TGT and decrypted encPart
-func (s *session) update(tgt messages.Ticket, dep messages.EncKDCRepPart) {
-	s.mux.Lock()
-	defer s.mux.Unlock()
-	s.authTime = dep.AuthTime
-	s.endTime = dep.EndTime
-	s.renewTill = dep.RenewTill
-	s.tgt = tgt
-	s.sessionKey = dep.Key
-	s.sessionKeyExpiration = dep.KeyExpiration
-}
-
-// destroy will cancel any auto renewal of the session and set the expiration times to the current time
-func (s *session) destroy() {
-	s.mux.Lock()
-	defer s.mux.Unlock()
-	if s.cancel != nil {
-		s.cancel <- true
-	}
-	s.endTime = time.Now().UTC()
-	s.renewTill = s.endTime
-	s.sessionKeyExpiration = s.endTime
-}
-
-// valid informs if the TGT is still within the valid time window
-func (s *session) valid() bool {
-	s.mux.RLock()
-	defer s.mux.RUnlock()
-	t := time.Now().UTC()
-	if t.Before(s.endTime) && s.authTime.Before(t) {
-		return true
-	}
-	return false
-}
-
-// tgtDetails is a thread safe way to get the session's realm, TGT and session key values
-func (s *session) tgtDetails() (string, messages.Ticket, types.EncryptionKey) {
-	s.mux.RLock()
-	defer s.mux.RUnlock()
-	return s.realm, s.tgt, s.sessionKey
-}
-
-// timeDetails is a thread safe way to get the session's validity time values
-func (s *session) timeDetails() (string, time.Time, time.Time, time.Time, time.Time) {
-	s.mux.RLock()
-	defer s.mux.RUnlock()
-	return s.realm, s.authTime, s.endTime, s.renewTill, s.sessionKeyExpiration
-}
-
-// enableAutoSessionRenewal turns on the automatic renewal for the client's TGT session.
-func (cl *Client) enableAutoSessionRenewal(s *session) {
-	var timer *time.Timer
-	s.mux.Lock()
-	s.cancel = make(chan bool, 1)
-	s.mux.Unlock()
-	go func(s *session) {
-		for {
-			s.mux.RLock()
-			w := (s.endTime.Sub(time.Now().UTC()) * 5) / 6
-			s.mux.RUnlock()
-			if w < 0 {
-				return
-			}
-			timer = time.NewTimer(w)
-			select {
-			case <-timer.C:
-				renewal, err := cl.refreshSession(s)
-				if err != nil {
-					cl.Log("error refreshing session: %v", err)
-				}
-				if !renewal && err == nil {
-					// end this goroutine as there will have been a new login and new auto renewal goroutine created.
-					return
-				}
-			case <-s.cancel:
-				// cancel has been called. Stop the timer and exit.
-				timer.Stop()
-				return
-			}
-		}
-	}(s)
-}
-
-// renewTGT renews the client's TGT session.
-func (cl *Client) renewTGT(s *session) error {
-	realm, tgt, skey := s.tgtDetails()
-	spn := types.PrincipalName{
-		NameType:   nametype.KRB_NT_SRV_INST,
-		NameString: []string{"krbtgt", realm},
-	}
-	_, tgsRep, err := cl.TGSREQGenerateAndExchange(spn, cl.Credentials.Domain(), tgt, skey, true)
-	if err != nil {
-		return krberror.Errorf(err, krberror.KRBMsgError, "error renewing TGT for %s", realm)
-	}
-	s.update(tgsRep.Ticket, tgsRep.DecryptedEncPart)
-	cl.sessions.update(s)
-	cl.Log("TGT session renewed for %s (EndTime: %v)", realm, tgsRep.DecryptedEncPart.EndTime)
-	return nil
-}
-
-// refreshSession updates either through renewal or creating a new login.
-// The boolean indicates if the update was a renewal.
-func (cl *Client) refreshSession(s *session) (bool, error) {
-	s.mux.RLock()
-	realm := s.realm
-	renewTill := s.renewTill
-	s.mux.RUnlock()
-	cl.Log("refreshing TGT session for %s", realm)
-	if time.Now().UTC().Before(renewTill) {
-		err := cl.renewTGT(s)
-		return true, err
-	}
-	err := cl.realmLogin(realm)
-	return false, err
-}
-
-// ensureValidSession makes sure there is a valid session for the realm
-func (cl *Client) ensureValidSession(realm string) error {
-	s, ok := cl.sessions.get(realm)
-	if ok {
-		s.mux.RLock()
-		d := s.endTime.Sub(s.authTime) / 6
-		if s.endTime.Sub(time.Now().UTC()) > d {
-			s.mux.RUnlock()
-			return nil
-		}
-		s.mux.RUnlock()
-		_, err := cl.refreshSession(s)
-		return err
-	}
-	return cl.realmLogin(realm)
-}
-
-// sessionTGTDetails is a thread safe way to get the TGT and session key values for a realm
-func (cl *Client) sessionTGT(realm string) (tgt messages.Ticket, sessionKey types.EncryptionKey, err error) {
-	err = cl.ensureValidSession(realm)
-	if err != nil {
-		return
-	}
-	s, ok := cl.sessions.get(realm)
-	if !ok {
-		err = fmt.Errorf("could not find TGT session for %s", realm)
-		return
-	}
-	_, tgt, sessionKey = s.tgtDetails()
-	return
-}
-
-func (cl *Client) sessionTimes(realm string) (authTime, endTime, renewTime, sessionExp time.Time, err error) {
-	s, ok := cl.sessions.get(realm)
-	if !ok {
-		err = fmt.Errorf("could not find TGT session for %s", realm)
-		return
-	}
-	_, authTime, endTime, renewTime, sessionExp = s.timeDetails()
-	return
-}
-
-// spnRealm resolves the realm name of a service principal name
-func (cl *Client) spnRealm(spn types.PrincipalName) string {
-	return cl.Config.ResolveRealm(spn.NameString[len(spn.NameString)-1])
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go
deleted file mode 100644
index 516c823..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/client/settings.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package client
-
-import "log"
-
-// Settings holds optional client settings.
-type Settings struct {
-	disablePAFXFast         bool
-	assumePreAuthentication bool
-	preAuthEType            int32
-	logger                  *log.Logger
-}
-
-// NewSettings creates a new client settings struct.
-func NewSettings(settings ...func(*Settings)) *Settings {
-	s := new(Settings)
-	for _, set := range settings {
-		set(s)
-	}
-	return s
-}
-
-// DisablePAFXFAST used to configure the client to not use PA_FX_FAST.
-//
-// s := NewSettings(DisablePAFXFAST(true))
-func DisablePAFXFAST(b bool) func(*Settings) {
-	return func(s *Settings) {
-		s.disablePAFXFast = b
-	}
-}
-
-// DisablePAFXFAST indicates is the client should disable the use of PA_FX_FAST.
-func (s *Settings) DisablePAFXFAST() bool {
-	return s.disablePAFXFast
-}
-
-// AssumePreAuthentication used to configure the client to assume pre-authentication is required.
-//
-// s := NewSettings(AssumePreAuthentication(true))
-func AssumePreAuthentication(b bool) func(*Settings) {
-	return func(s *Settings) {
-		s.disablePAFXFast = b
-	}
-}
-
-// AssumePreAuthentication indicates if the client should proactively assume using pre-authentication.
-func (s *Settings) AssumePreAuthentication() bool {
-	return s.assumePreAuthentication
-}
-
-// Logger used to configure client with a logger.
-//
-// s := NewSettings(kt, Logger(l))
-func Logger(l *log.Logger) func(*Settings) {
-	return func(s *Settings) {
-		s.logger = l
-	}
-}
-
-// Logger returns the client logger instance.
-func (s *Settings) Logger() *log.Logger {
-	return s.logger
-}
-
-// Log will write to the service's logger if it is configured.
-func (cl *Client) Log(format string, v ...interface{}) {
-	if cl.settings.Logger() != nil {
-		cl.settings.Logger().Printf(format, v...)
-	}
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/error.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/error.go
deleted file mode 100644
index 1fbda51..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/error.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package config
-
-import "fmt"
-
-// UnsupportedDirective error.
-type UnsupportedDirective struct {
-	text string
-}
-
-// Error implements the error interface for unsupported directives.
-func (e UnsupportedDirective) Error() string {
-	return e.text
-}
-
-// Invalid config error.
-type Invalid struct {
-	text string
-}
-
-// Error implements the error interface for invalid config error.
-func (e Invalid) Error() string {
-	return e.text
-}
-
-// InvalidErrorf creates a new Invalid error.
-func InvalidErrorf(format string, a ...interface{}) Invalid {
-	return Invalid{
-		text: fmt.Sprintf("invalid krb5 config "+format, a...),
-	}
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go
deleted file mode 100644
index a58c234..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/hosts.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package config
-
-import (
-	"fmt"
-	"math/rand"
-	"net"
-	"strconv"
-	"strings"
-
-	"gopkg.in/jcmturner/dnsutils.v1"
-)
-
-// GetKDCs returns the count of KDCs available and a map of KDC host names keyed on preference order.
-func (c *Config) GetKDCs(realm string, tcp bool) (int, map[int]string, error) {
-	if realm == "" {
-		realm = c.LibDefaults.DefaultRealm
-	}
-	kdcs := make(map[int]string)
-	var count int
-
-	// Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf.
-	if c.LibDefaults.DNSLookupKDC {
-		proto := "udp"
-		if tcp {
-			proto = "tcp"
-		}
-		c, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm)
-		if err != nil {
-			return count, kdcs, err
-		}
-		if len(addrs) < 1 {
-			return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm)
-		}
-		count = c
-		for k, v := range addrs {
-			kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port))
-		}
-	} else {
-		// Get the KDCs from the krb5.conf an order them randomly for preference.
-		var ks []string
-		for _, r := range c.Realms {
-			if r.Realm == realm {
-				ks = r.KDC
-				break
-			}
-		}
-		count = len(ks)
-		if count < 1 {
-			return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm)
-		}
-		kdcs = randServOrder(ks)
-	}
-	return count, kdcs, nil
-}
-
-// GetKpasswdServers returns the count of kpasswd servers available and a map of kpasswd host names keyed on preference order.
-// https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html#realms - see kpasswd_server section
-func (c *Config) GetKpasswdServers(realm string, tcp bool) (int, map[int]string, error) {
-	kdcs := make(map[int]string)
-	var count int
-
-	// Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf.
-	if c.LibDefaults.DNSLookupKDC {
-		proto := "udp"
-		if tcp {
-			proto = "tcp"
-		}
-		c, addrs, err := dnsutils.OrderedSRV("kpasswd", proto, realm)
-		if err != nil {
-			return count, kdcs, err
-		}
-		if c < 1 {
-			c, addrs, err = dnsutils.OrderedSRV("kerberos-adm", proto, realm)
-			if err != nil {
-				return count, kdcs, err
-			}
-		}
-		if len(addrs) < 1 {
-			return count, kdcs, fmt.Errorf("no kpasswd or kadmin SRV records found for realm %s", realm)
-		}
-		count = c
-		for k, v := range addrs {
-			kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port))
-		}
-	} else {
-		// Get the KDCs from the krb5.conf an order them randomly for preference.
-		var ks []string
-		var ka []string
-		for _, r := range c.Realms {
-			if r.Realm == realm {
-				ks = r.KPasswdServer
-				ka = r.AdminServer
-				break
-			}
-		}
-		if len(ks) < 1 {
-			for _, k := range ka {
-				h, _, err := net.SplitHostPort(k)
-				if err != nil {
-					continue
-				}
-				ks = append(ks, h+":464")
-			}
-		}
-		count = len(ks)
-		if count < 1 {
-			return count, kdcs, fmt.Errorf("no kpasswd or kadmin defined in configuration for realm %s", realm)
-		}
-		kdcs = randServOrder(ks)
-	}
-	return count, kdcs, nil
-}
-
-func randServOrder(ks []string) map[int]string {
-	kdcs := make(map[int]string)
-	count := len(ks)
-	i := 1
-	if count > 1 {
-		l := len(ks)
-		for l > 0 {
-			ri := rand.Intn(l)
-			kdcs[i] = ks[ri]
-			if l > 1 {
-				// Remove the entry from the source slice by swapping with the last entry and truncating
-				ks[len(ks)-1], ks[ri] = ks[ri], ks[len(ks)-1]
-				ks = ks[:len(ks)-1]
-				l = len(ks)
-			} else {
-				l = 0
-			}
-			i++
-		}
-	} else {
-		kdcs[i] = ks[0]
-	}
-	return kdcs
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/krb5conf.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/config/krb5conf.go
deleted file mode 100644
index 8efe92d..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/config/krb5conf.go
+++ /dev/null
@@ -1,726 +0,0 @@
-// Package config implements KRB5 client and service configuration as described at https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html
-package config
-
-import (
-	"bufio"
-	"encoding/hex"
-	"errors"
-	"fmt"
-	"io"
-	"net"
-	"os"
-	"os/user"
-	"regexp"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-// Config represents the KRB5 configuration.
-type Config struct {
-	LibDefaults LibDefaults
-	Realms      []Realm
-	DomainRealm DomainRealm
-	//CaPaths
-	//AppDefaults
-	//Plugins
-}
-
-// WeakETypeList is a list of encryption types that have been deemed weak.
-const WeakETypeList = "des-cbc-crc des-cbc-md4 des-cbc-md5 des-cbc-raw des3-cbc-raw des-hmac-sha1 arcfour-hmac-exp rc4-hmac-exp arcfour-hmac-md5-exp des"
-
-// NewConfig creates a new config struct instance.
-func NewConfig() *Config {
-	d := make(DomainRealm)
-	return &Config{
-		LibDefaults: newLibDefaults(),
-		DomainRealm: d,
-	}
-}
-
-// LibDefaults represents the [libdefaults] section of the configuration.
-type LibDefaults struct {
-	AllowWeakCrypto bool //default false
-	// ap_req_checksum_type int //unlikely to support this
-	Canonicalize bool          //default false
-	CCacheType   int           //default is 4. unlikely to implement older
-	Clockskew    time.Duration //max allowed skew in seconds, default 300
-	//Default_ccache_name string // default /tmp/krb5cc_%{uid} //Not implementing as will hold in memory
-	DefaultClientKeytabName string //default /usr/local/var/krb5/user/%{euid}/client.keytab
-	DefaultKeytabName       string //default /etc/krb5.keytab
-	DefaultRealm            string
-	DefaultTGSEnctypes      []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
-	DefaultTktEnctypes      []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
-	DefaultTGSEnctypeIDs    []int32  //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
-	DefaultTktEnctypeIDs    []int32  //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
-	DNSCanonicalizeHostname bool     //default true
-	DNSLookupKDC            bool     //default false
-	DNSLookupRealm          bool
-	ExtraAddresses          []net.IP       //Not implementing yet
-	Forwardable             bool           //default false
-	IgnoreAcceptorHostname  bool           //default false
-	K5LoginAuthoritative    bool           //default false
-	K5LoginDirectory        string         //default user's home directory. Must be owned by the user or root
-	KDCDefaultOptions       asn1.BitString //default 0x00000010 (KDC_OPT_RENEWABLE_OK)
-	KDCTimeSync             int            //default 1
-	//kdc_req_checksum_type int //unlikely to implement as for very old KDCs
-	NoAddresses         bool     //default true
-	PermittedEnctypes   []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4
-	PermittedEnctypeIDs []int32
-	//plugin_base_dir string //not supporting plugins
-	PreferredPreauthTypes []int         //default “17, 16, 15, 14”, which forces libkrb5 to attempt to use PKINIT if it is supported
-	Proxiable             bool          //default false
-	RDNS                  bool          //default true
-	RealmTryDomains       int           //default -1
-	RenewLifetime         time.Duration //default 0
-	SafeChecksumType      int           //default 8
-	TicketLifetime        time.Duration //default 1 day
-	UDPPreferenceLimit    int           // 1 means to always use tcp. MIT krb5 has a default value of 1465, and it prevents user setting more than 32700.
-	VerifyAPReqNofail     bool          //default false
-}
-
-// Create a new LibDefaults struct.
-func newLibDefaults() LibDefaults {
-	uid := "0"
-	var hdir string
-	usr, _ := user.Current()
-	if usr != nil {
-		uid = usr.Uid
-		hdir = usr.HomeDir
-	}
-	opts := asn1.BitString{}
-	opts.Bytes, _ = hex.DecodeString("00000010")
-	opts.BitLength = len(opts.Bytes) * 8
-	return LibDefaults{
-		CCacheType:              4,
-		Clockskew:               time.Duration(300) * time.Second,
-		DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid),
-		DefaultKeytabName:       "/etc/krb5.keytab",
-		DefaultTGSEnctypes:      []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"},
-		DefaultTktEnctypes:      []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"},
-		DNSCanonicalizeHostname: true,
-		K5LoginDirectory:        hdir,
-		KDCDefaultOptions:       opts,
-		KDCTimeSync:             1,
-		NoAddresses:             true,
-		PermittedEnctypes:       []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"},
-		RDNS:                    true,
-		RealmTryDomains:         -1,
-		SafeChecksumType:        8,
-		TicketLifetime:          time.Duration(24) * time.Hour,
-		UDPPreferenceLimit:      1465,
-		PreferredPreauthTypes:   []int{17, 16, 15, 14},
-	}
-}
-
-// Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct.
-func (l *LibDefaults) parseLines(lines []string) error {
-	for _, line := range lines {
-		//Remove comments after the values
-		if idx := strings.IndexAny(line, "#;"); idx != -1 {
-			line = line[:idx]
-		}
-		line = strings.TrimSpace(line)
-		if line == "" {
-			continue
-		}
-		if !strings.Contains(line, "=") {
-			return InvalidErrorf("libdefaults section line (%s)", line)
-		}
-
-		p := strings.Split(line, "=")
-		key := strings.TrimSpace(strings.ToLower(p[0]))
-		switch key {
-		case "allow_weak_crypto":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.AllowWeakCrypto = v
-		case "canonicalize":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.Canonicalize = v
-		case "ccache_type":
-			p[1] = strings.TrimSpace(p[1])
-			v, err := strconv.ParseUint(p[1], 10, 32)
-			if err != nil || v < 0 || v > 4 {
-				return InvalidErrorf("libdefaults section line (%s)", line)
-			}
-			l.CCacheType = int(v)
-		case "clockskew":
-			d, err := parseDuration(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.Clockskew = d
-		case "default_client_keytab_name":
-			l.DefaultClientKeytabName = strings.TrimSpace(p[1])
-		case "default_keytab_name":
-			l.DefaultKeytabName = strings.TrimSpace(p[1])
-		case "default_realm":
-			l.DefaultRealm = strings.TrimSpace(p[1])
-		case "default_tgs_enctypes":
-			l.DefaultTGSEnctypes = strings.Fields(p[1])
-		case "default_tkt_enctypes":
-			l.DefaultTktEnctypes = strings.Fields(p[1])
-		case "dns_canonicalize_hostname":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.DNSCanonicalizeHostname = v
-		case "dns_lookup_kdc":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.DNSLookupKDC = v
-		case "dns_lookup_realm":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.DNSLookupRealm = v
-		case "extra_addresses":
-			ipStr := strings.TrimSpace(p[1])
-			for _, ip := range strings.Split(ipStr, ",") {
-				if eip := net.ParseIP(ip); eip != nil {
-					l.ExtraAddresses = append(l.ExtraAddresses, eip)
-				}
-			}
-		case "forwardable":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.Forwardable = v
-		case "ignore_acceptor_hostname":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.IgnoreAcceptorHostname = v
-		case "k5login_authoritative":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.K5LoginAuthoritative = v
-		case "k5login_directory":
-			l.K5LoginDirectory = strings.TrimSpace(p[1])
-		case "kdc_default_options":
-			v := strings.TrimSpace(p[1])
-			v = strings.Replace(v, "0x", "", -1)
-			b, err := hex.DecodeString(v)
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.KDCDefaultOptions.Bytes = b
-			l.KDCDefaultOptions.BitLength = len(b) * 8
-		case "kdc_timesync":
-			p[1] = strings.TrimSpace(p[1])
-			v, err := strconv.ParseInt(p[1], 10, 32)
-			if err != nil || v < 0 {
-				return InvalidErrorf("libdefaults section line (%s)", line)
-			}
-			l.KDCTimeSync = int(v)
-		case "noaddresses":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.NoAddresses = v
-		case "permitted_enctypes":
-			l.PermittedEnctypes = strings.Fields(p[1])
-		case "preferred_preauth_types":
-			p[1] = strings.TrimSpace(p[1])
-			t := strings.Split(p[1], ",")
-			var v []int
-			for _, s := range t {
-				i, err := strconv.ParseInt(s, 10, 32)
-				if err != nil {
-					return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-				}
-				v = append(v, int(i))
-			}
-			l.PreferredPreauthTypes = v
-		case "proxiable":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.Proxiable = v
-		case "rdns":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.RDNS = v
-		case "realm_try_domains":
-			p[1] = strings.TrimSpace(p[1])
-			v, err := strconv.ParseInt(p[1], 10, 32)
-			if err != nil || v < -1 {
-				return InvalidErrorf("libdefaults section line (%s)", line)
-			}
-			l.RealmTryDomains = int(v)
-		case "renew_lifetime":
-			d, err := parseDuration(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.RenewLifetime = d
-		case "safe_checksum_type":
-			p[1] = strings.TrimSpace(p[1])
-			v, err := strconv.ParseInt(p[1], 10, 32)
-			if err != nil || v < 0 {
-				return InvalidErrorf("libdefaults section line (%s)", line)
-			}
-			l.SafeChecksumType = int(v)
-		case "ticket_lifetime":
-			d, err := parseDuration(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.TicketLifetime = d
-		case "udp_preference_limit":
-			p[1] = strings.TrimSpace(p[1])
-			v, err := strconv.ParseUint(p[1], 10, 32)
-			if err != nil || v > 32700 {
-				return InvalidErrorf("libdefaults section line (%s)", line)
-			}
-			l.UDPPreferenceLimit = int(v)
-		case "verify_ap_req_nofail":
-			v, err := parseBoolean(p[1])
-			if err != nil {
-				return InvalidErrorf("libdefaults section line (%s): %v", line, err)
-			}
-			l.VerifyAPReqNofail = v
-		default:
-			//Ignore the line
-			continue
-		}
-	}
-	l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto)
-	l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto)
-	l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto)
-	return nil
-}
-
-// Realm represents an entry in the [realms] section of the configuration.
-type Realm struct {
-	Realm       string
-	AdminServer []string
-	//auth_to_local //Not implementing for now
-	//auth_to_local_names //Not implementing for now
-	DefaultDomain string
-	KDC           []string
-	KPasswdServer []string //default admin_server:464
-	MasterKDC     []string
-}
-
-// Parse the lines of a [realms] entry into the Realm struct.
-func (r *Realm) parseLines(name string, lines []string) (err error) {
-	r.Realm = name
-	var adminServerFinal bool
-	var KDCFinal bool
-	var kpasswdServerFinal bool
-	var masterKDCFinal bool
-	var ignore bool
-	var c int // counts the depth of blocks within brackets { }
-	for _, line := range lines {
-		if ignore && c > 0 && !strings.Contains(line, "{") && !strings.Contains(line, "}") {
-			continue
-		}
-		//Remove comments after the values
-		if idx := strings.IndexAny(line, "#;"); idx != -1 {
-			line = line[:idx]
-		}
-		line = strings.TrimSpace(line)
-		if line == "" {
-			continue
-		}
-		if !strings.Contains(line, "=") && !strings.Contains(line, "}") {
-			return InvalidErrorf("realms section line (%s)", line)
-		}
-		if strings.Contains(line, "v4_") {
-			ignore = true
-			err = UnsupportedDirective{"v4 configurations are not supported"}
-		}
-		if strings.Contains(line, "{") {
-			c++
-			if ignore {
-				continue
-			}
-		}
-		if strings.Contains(line, "}") {
-			c--
-			if c < 0 {
-				return InvalidErrorf("unpaired curly brackets")
-			}
-			if ignore {
-				if c < 1 {
-					c = 0
-					ignore = false
-				}
-				continue
-			}
-		}
-
-		p := strings.Split(line, "=")
-		key := strings.TrimSpace(strings.ToLower(p[0]))
-		v := strings.TrimSpace(p[1])
-		switch key {
-		case "admin_server":
-			appendUntilFinal(&r.AdminServer, v, &adminServerFinal)
-		case "default_domain":
-			r.DefaultDomain = v
-		case "kdc":
-			if !strings.Contains(v, ":") {
-				// No port number specified default to 88
-				if strings.HasSuffix(v, `*`) {
-					v = strings.TrimSpace(strings.TrimSuffix(v, `*`)) + ":88*"
-				} else {
-					v = strings.TrimSpace(v) + ":88"
-				}
-			}
-			appendUntilFinal(&r.KDC, v, &KDCFinal)
-		case "kpasswd_server":
-			appendUntilFinal(&r.KPasswdServer, v, &kpasswdServerFinal)
-		case "master_kdc":
-			appendUntilFinal(&r.MasterKDC, v, &masterKDCFinal)
-		default:
-			//Ignore the line
-			continue
-		}
-	}
-	//default for Kpasswd_server = admin_server:464
-	if len(r.KPasswdServer) < 1 {
-		for _, a := range r.AdminServer {
-			s := strings.Split(a, ":")
-			r.KPasswdServer = append(r.KPasswdServer, s[0]+":464")
-		}
-	}
-	return
-}
-
-// Parse the lines of the [realms] section of the configuration into an slice of Realm structs.
-func parseRealms(lines []string) (realms []Realm, err error) {
-	var name string
-	var start int
-	var c int
-	for i, l := range lines {
-		//Remove comments after the values
-		if idx := strings.IndexAny(l, "#;"); idx != -1 {
-			l = l[:idx]
-		}
-		l = strings.TrimSpace(l)
-		if l == "" {
-			continue
-		}
-		//if strings.Contains(l, "v4_") {
-		//	return nil, errors.New("v4 configurations are not supported in Realms section")
-		//}
-		if strings.Contains(l, "{") {
-			c++
-			if !strings.Contains(l, "=") {
-				return nil, fmt.Errorf("realm configuration line invalid: %s", l)
-			}
-			if c == 1 {
-				start = i
-				p := strings.Split(l, "=")
-				name = strings.TrimSpace(p[0])
-			}
-		}
-		if strings.Contains(l, "}") {
-			if c < 1 {
-				// but not started a block!!!
-				return nil, errors.New("invalid Realms section in configuration")
-			}
-			c--
-			if c == 0 {
-				var r Realm
-				e := r.parseLines(name, lines[start+1:i])
-				if e != nil {
-					if _, ok := e.(UnsupportedDirective); !ok {
-						err = e
-						return
-					}
-					err = e
-				}
-				realms = append(realms, r)
-			}
-		}
-	}
-	return
-}
-
-// DomainRealm maps the domains to realms representing the [domain_realm] section of the configuration.
-type DomainRealm map[string]string
-
-// Parse the lines of the [domain_realm] section of the configuration and add to the mapping.
-func (d *DomainRealm) parseLines(lines []string) error {
-	for _, line := range lines {
-		//Remove comments after the values
-		if idx := strings.IndexAny(line, "#;"); idx != -1 {
-			line = line[:idx]
-		}
-		if strings.TrimSpace(line) == "" {
-			continue
-		}
-		if !strings.Contains(line, "=") {
-			return InvalidErrorf("realm line (%s)", line)
-		}
-		p := strings.Split(line, "=")
-		domain := strings.TrimSpace(strings.ToLower(p[0]))
-		realm := strings.TrimSpace(p[1])
-		d.addMapping(domain, realm)
-	}
-	return nil
-}
-
-// Add a domain to realm mapping.
-func (d *DomainRealm) addMapping(domain, realm string) {
-	(*d)[domain] = realm
-}
-
-// Delete a domain to realm mapping.
-func (d *DomainRealm) deleteMapping(domain, realm string) {
-	delete(*d, domain)
-}
-
-// ResolveRealm resolves the kerberos realm for the specified domain name from the domain to realm mapping.
-// The most specific mapping is returned.
-func (c *Config) ResolveRealm(domainName string) string {
-	domainName = strings.TrimSuffix(domainName, ".")
-
-	// Try to match the entire hostname first
-	if r, ok := c.DomainRealm[domainName]; ok {
-		return r
-	}
-
-	// Try to match all DNS domain parts
-	periods := strings.Count(domainName, ".") + 1
-	for i := 2; i <= periods; i++ {
-		z := strings.SplitN(domainName, ".", i)
-		if r, ok := c.DomainRealm["."+z[len(z)-1]]; ok {
-			return r
-		}
-	}
-	return c.LibDefaults.DefaultRealm
-}
-
-// Load the KRB5 configuration from the specified file path.
-func Load(cfgPath string) (*Config, error) {
-	fh, err := os.Open(cfgPath)
-	if err != nil {
-		return nil, errors.New("configuration file could not be opened: " + cfgPath + " " + err.Error())
-	}
-	defer fh.Close()
-	scanner := bufio.NewScanner(fh)
-	return NewConfigFromScanner(scanner)
-}
-
-// NewConfigFromString creates a new Config struct from a string.
-func NewConfigFromString(s string) (*Config, error) {
-	reader := strings.NewReader(s)
-	return NewConfigFromReader(reader)
-}
-
-// NewConfigFromReader creates a new Config struct from an io.Reader.
-func NewConfigFromReader(r io.Reader) (*Config, error) {
-	scanner := bufio.NewScanner(r)
-	return NewConfigFromScanner(scanner)
-}
-
-// NewConfigFromScanner creates a new Config struct from a bufio.Scanner.
-func NewConfigFromScanner(scanner *bufio.Scanner) (*Config, error) {
-	c := NewConfig()
-	var e error
-	sections := make(map[int]string)
-	var sectionLineNum []int
-	var lines []string
-	for scanner.Scan() {
-		// Skip comments and blank lines
-		if matched, _ := regexp.MatchString(`^\s*(#|;|\n)`, scanner.Text()); matched {
-			continue
-		}
-		if matched, _ := regexp.MatchString(`^\s*\[libdefaults\]\s*`, scanner.Text()); matched {
-			sections[len(lines)] = "libdefaults"
-			sectionLineNum = append(sectionLineNum, len(lines))
-			continue
-		}
-		if matched, _ := regexp.MatchString(`^\s*\[realms\]\s*`, scanner.Text()); matched {
-			sections[len(lines)] = "realms"
-			sectionLineNum = append(sectionLineNum, len(lines))
-			continue
-		}
-		if matched, _ := regexp.MatchString(`^\s*\[domain_realm\]\s*`, scanner.Text()); matched {
-			sections[len(lines)] = "domain_realm"
-			sectionLineNum = append(sectionLineNum, len(lines))
-			continue
-		}
-		if matched, _ := regexp.MatchString(`^\s*\[.*\]\s*`, scanner.Text()); matched {
-			sections[len(lines)] = "unknown_section"
-			sectionLineNum = append(sectionLineNum, len(lines))
-			continue
-		}
-		lines = append(lines, scanner.Text())
-	}
-	for i, start := range sectionLineNum {
-		var end int
-		if i+1 >= len(sectionLineNum) {
-			end = len(lines)
-		} else {
-			end = sectionLineNum[i+1]
-		}
-		switch section := sections[start]; section {
-		case "libdefaults":
-			err := c.LibDefaults.parseLines(lines[start:end])
-			if err != nil {
-				if _, ok := err.(UnsupportedDirective); !ok {
-					return nil, fmt.Errorf("error processing libdefaults section: %v", err)
-				}
-				e = err
-			}
-		case "realms":
-			realms, err := parseRealms(lines[start:end])
-			if err != nil {
-				if _, ok := err.(UnsupportedDirective); !ok {
-					return nil, fmt.Errorf("error processing realms section: %v", err)
-				}
-				e = err
-			}
-			c.Realms = realms
-		case "domain_realm":
-			err := c.DomainRealm.parseLines(lines[start:end])
-			if err != nil {
-				if _, ok := err.(UnsupportedDirective); !ok {
-					return nil, fmt.Errorf("error processing domaain_realm section: %v", err)
-				}
-				e = err
-			}
-		default:
-			continue
-		}
-	}
-	return c, e
-}
-
-// Parse a space delimited list of ETypes into a list of EType numbers optionally filtering out weak ETypes.
-func parseETypes(s []string, w bool) []int32 {
-	var eti []int32
-	for _, et := range s {
-		if !w {
-			var weak bool
-			for _, wet := range strings.Fields(WeakETypeList) {
-				if et == wet {
-					weak = true
-					break
-				}
-			}
-			if weak {
-				continue
-			}
-		}
-		i := etypeID.EtypeSupported(et)
-		if i != 0 {
-			eti = append(eti, i)
-		}
-	}
-	return eti
-}
-
-// Parse a time duration string in the configuration to a golang time.Duration.
-func parseDuration(s string) (time.Duration, error) {
-	s = strings.Replace(strings.TrimSpace(s), " ", "", -1)
-
-	// handle Nd[NmNs]
-	if strings.Contains(s, "d") {
-		ds := strings.SplitN(s, "d", 2)
-		dn, err := strconv.ParseUint(ds[0], 10, 32)
-		if err != nil {
-			return time.Duration(0), errors.New("invalid time duration")
-		}
-		d := time.Duration(dn*24) * time.Hour
-		if ds[1] != "" {
-			dp, err := time.ParseDuration(ds[1])
-			if err != nil {
-				return time.Duration(0), errors.New("invalid time duration")
-			}
-			d = d + dp
-		}
-		return d, nil
-	}
-
-	// handle Nm[Ns]
-	d, err := time.ParseDuration(s)
-	if err == nil {
-		return d, nil
-	}
-
-	// handle N
-	v, err := strconv.ParseUint(s, 10, 32)
-	if err == nil && v > 0 {
-		return time.Duration(v) * time.Second, nil
-	}
-
-	// handle h:m[:s]
-	if strings.Contains(s, ":") {
-		t := strings.Split(s, ":")
-		if 2 > len(t) || len(t) > 3 {
-			return time.Duration(0), errors.New("invalid time duration value")
-		}
-		var i []int
-		for _, n := range t {
-			j, err := strconv.ParseInt(n, 10, 16)
-			if err != nil {
-				return time.Duration(0), errors.New("invalid time duration value")
-			}
-			i = append(i, int(j))
-		}
-		d := time.Duration(i[0])*time.Hour + time.Duration(i[1])*time.Minute
-		if len(i) == 3 {
-			d = d + time.Duration(i[2])*time.Second
-		}
-		return d, nil
-	}
-	return time.Duration(0), errors.New("invalid time duration value")
-}
-
-// Parse possible boolean values to golang bool.
-func parseBoolean(s string) (bool, error) {
-	s = strings.TrimSpace(s)
-	v, err := strconv.ParseBool(s)
-	if err == nil {
-		return v, nil
-	}
-	switch strings.ToLower(s) {
-	case "yes":
-		return true, nil
-	case "y":
-		return true, nil
-	case "no":
-		return false, nil
-	case "n":
-		return false, nil
-	}
-	return false, errors.New("invalid boolean value")
-}
-
-// Parse array of strings but stop if an asterisk is placed at the end of a line.
-func appendUntilFinal(s *[]string, value string, final *bool) {
-	if *final {
-		return
-	}
-	if last := len(value) - 1; last >= 0 && value[last] == '*' {
-		*final = true
-		value = value[:len(value)-1]
-	}
-	*s = append(*s, value)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/ccache.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/ccache.go
deleted file mode 100644
index 98ec29b..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/ccache.go
+++ /dev/null
@@ -1,348 +0,0 @@
-package credentials
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"io/ioutil"
-	"strings"
-	"time"
-	"unsafe"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-const (
-	headerFieldTagKDCOffset = 1
-)
-
-// The first byte of the file always has the value 5.
-// The value of the second byte contains the version number (1 through 4)
-// Versions 1 and 2 of the file format use native byte order for integer representations.
-// Versions 3 and 4 always use big-endian byte order
-// After the two-byte version indicator, the file has three parts:
-//   1) the header (in version 4 only)
-//   2) the default principal name
-//   3) a sequence of credentials
-
-// CCache is the file credentials cache as define here: https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html
-type CCache struct {
-	Version          uint8
-	Header           header
-	DefaultPrincipal principal
-	Credentials      []*Credential
-	Path             string
-}
-
-type header struct {
-	length uint16
-	fields []headerField
-}
-
-type headerField struct {
-	tag    uint16
-	length uint16
-	value  []byte
-}
-
-// Credential cache entry principal struct.
-type principal struct {
-	Realm         string
-	PrincipalName types.PrincipalName
-}
-
-// Credential holds a Kerberos client's ccache credential information.
-type Credential struct {
-	Client       principal
-	Server       principal
-	Key          types.EncryptionKey
-	AuthTime     time.Time
-	StartTime    time.Time
-	EndTime      time.Time
-	RenewTill    time.Time
-	IsSKey       bool
-	TicketFlags  asn1.BitString
-	Addresses    []types.HostAddress
-	AuthData     []types.AuthorizationDataEntry
-	Ticket       []byte
-	SecondTicket []byte
-}
-
-// LoadCCache loads a credential cache file into a CCache type.
-func LoadCCache(cpath string) (*CCache, error) {
-	c := new(CCache)
-	b, err := ioutil.ReadFile(cpath)
-	if err != nil {
-		return c, err
-	}
-	err = c.Unmarshal(b)
-	return c, err
-}
-
-// Unmarshal a byte slice of credential cache data into CCache type.
-func (c *CCache) Unmarshal(b []byte) error {
-	p := 0
-	//The first byte of the file always has the value 5
-	if int8(b[p]) != 5 {
-		return errors.New("Invalid credential cache data. First byte does not equal 5")
-	}
-	p++
-	//Get credential cache version
-	//The second byte contains the version number (1 to 4)
-	c.Version = b[p]
-	if c.Version < 1 || c.Version > 4 {
-		return errors.New("Invalid credential cache data. Keytab version is not within 1 to 4")
-	}
-	p++
-	//Version 1 or 2 of the file format uses native byte order for integer representations. Versions 3 & 4 always uses big-endian byte order
-	var endian binary.ByteOrder
-	endian = binary.BigEndian
-	if (c.Version == 1 || c.Version == 2) && isNativeEndianLittle() {
-		endian = binary.LittleEndian
-	}
-	if c.Version == 4 {
-		err := parseHeader(b, &p, c, &endian)
-		if err != nil {
-			return err
-		}
-	}
-	c.DefaultPrincipal = parsePrincipal(b, &p, c, &endian)
-	for p < len(b) {
-		cred, err := parseCredential(b, &p, c, &endian)
-		if err != nil {
-			return err
-		}
-		c.Credentials = append(c.Credentials, cred)
-	}
-	return nil
-}
-
-func parseHeader(b []byte, p *int, c *CCache, e *binary.ByteOrder) error {
-	if c.Version != 4 {
-		return errors.New("Credentials cache version is not 4 so there is no header to parse.")
-	}
-	h := header{}
-	h.length = uint16(readInt16(b, p, e))
-	for *p <= int(h.length) {
-		f := headerField{}
-		f.tag = uint16(readInt16(b, p, e))
-		f.length = uint16(readInt16(b, p, e))
-		f.value = b[*p : *p+int(f.length)]
-		*p += int(f.length)
-		if !f.valid() {
-			return errors.New("Invalid credential cache header found")
-		}
-		h.fields = append(h.fields, f)
-	}
-	c.Header = h
-	return nil
-}
-
-// Parse the Keytab bytes of a principal into a Keytab entry's principal.
-func parsePrincipal(b []byte, p *int, c *CCache, e *binary.ByteOrder) (princ principal) {
-	if c.Version != 1 {
-		//Name Type is omitted in version 1
-		princ.PrincipalName.NameType = readInt32(b, p, e)
-	}
-	nc := int(readInt32(b, p, e))
-	if c.Version == 1 {
-		//In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2
-		nc--
-	}
-	lenRealm := readInt32(b, p, e)
-	princ.Realm = string(readBytes(b, p, int(lenRealm), e))
-	for i := 0; i < nc; i++ {
-		l := readInt32(b, p, e)
-		princ.PrincipalName.NameString = append(princ.PrincipalName.NameString, string(readBytes(b, p, int(l), e)))
-	}
-	return princ
-}
-
-func parseCredential(b []byte, p *int, c *CCache, e *binary.ByteOrder) (cred *Credential, err error) {
-	cred = new(Credential)
-	cred.Client = parsePrincipal(b, p, c, e)
-	cred.Server = parsePrincipal(b, p, c, e)
-	key := types.EncryptionKey{}
-	key.KeyType = int32(readInt16(b, p, e))
-	if c.Version == 3 {
-		//repeated twice in version 3
-		key.KeyType = int32(readInt16(b, p, e))
-	}
-	key.KeyValue = readData(b, p, e)
-	cred.Key = key
-	cred.AuthTime = readTimestamp(b, p, e)
-	cred.StartTime = readTimestamp(b, p, e)
-	cred.EndTime = readTimestamp(b, p, e)
-	cred.RenewTill = readTimestamp(b, p, e)
-	if ik := readInt8(b, p, e); ik == 0 {
-		cred.IsSKey = false
-	} else {
-		cred.IsSKey = true
-	}
-	cred.TicketFlags = types.NewKrbFlags()
-	cred.TicketFlags.Bytes = readBytes(b, p, 4, e)
-	l := int(readInt32(b, p, e))
-	cred.Addresses = make([]types.HostAddress, l, l)
-	for i := range cred.Addresses {
-		cred.Addresses[i] = readAddress(b, p, e)
-	}
-	l = int(readInt32(b, p, e))
-	cred.AuthData = make([]types.AuthorizationDataEntry, l, l)
-	for i := range cred.AuthData {
-		cred.AuthData[i] = readAuthDataEntry(b, p, e)
-	}
-	cred.Ticket = readData(b, p, e)
-	cred.SecondTicket = readData(b, p, e)
-	return
-}
-
-// GetClientPrincipalName returns a PrincipalName type for the client the credentials cache is for.
-func (c *CCache) GetClientPrincipalName() types.PrincipalName {
-	return c.DefaultPrincipal.PrincipalName
-}
-
-// GetClientRealm returns the reals of the client the credentials cache is for.
-func (c *CCache) GetClientRealm() string {
-	return c.DefaultPrincipal.Realm
-}
-
-// GetClientCredentials returns a Credentials object representing the client of the credentials cache.
-func (c *CCache) GetClientCredentials() *Credentials {
-	return &Credentials{
-		username: c.DefaultPrincipal.PrincipalName.PrincipalNameString(),
-		realm:    c.GetClientRealm(),
-		cname:    c.DefaultPrincipal.PrincipalName,
-	}
-}
-
-// Contains tests if the cache contains a credential for the provided server PrincipalName
-func (c *CCache) Contains(p types.PrincipalName) bool {
-	for _, cred := range c.Credentials {
-		if cred.Server.PrincipalName.Equal(p) {
-			return true
-		}
-	}
-	return false
-}
-
-// GetEntry returns a specific credential for the PrincipalName provided.
-func (c *CCache) GetEntry(p types.PrincipalName) (*Credential, bool) {
-	cred := new(Credential)
-	var found bool
-	for i := range c.Credentials {
-		if c.Credentials[i].Server.PrincipalName.Equal(p) {
-			cred = c.Credentials[i]
-			found = true
-			break
-		}
-	}
-	if !found {
-		return cred, false
-	}
-	return cred, true
-}
-
-// GetEntries filters out configuration entries an returns a slice of credentials.
-func (c *CCache) GetEntries() []*Credential {
-	creds := make([]*Credential, 0)
-	for _, cred := range c.Credentials {
-		// Filter out configuration entries
-		if strings.HasPrefix(cred.Server.Realm, "X-CACHECONF") {
-			continue
-		}
-		creds = append(creds, cred)
-	}
-	return creds
-}
-
-func (h *headerField) valid() bool {
-	// At this time there is only one defined header field.
-	// Its tag value is 1, its length is always 8.
-	// Its contents are two 32-bit integers giving the seconds and microseconds
-	// of the time offset of the KDC relative to the client.
-	// Adding this offset to the current time on the client should give the current time on the KDC, if that offset has not changed since the initial authentication.
-
-	// Done as a switch in case other tag values are added in the future.
-	switch h.tag {
-	case headerFieldTagKDCOffset:
-		if h.length != 8 || len(h.value) != 8 {
-			return false
-		}
-		return true
-	}
-	return false
-}
-
-func readData(b []byte, p *int, e *binary.ByteOrder) []byte {
-	l := readInt32(b, p, e)
-	return readBytes(b, p, int(l), e)
-}
-
-func readAddress(b []byte, p *int, e *binary.ByteOrder) types.HostAddress {
-	a := types.HostAddress{}
-	a.AddrType = int32(readInt16(b, p, e))
-	a.Address = readData(b, p, e)
-	return a
-}
-
-func readAuthDataEntry(b []byte, p *int, e *binary.ByteOrder) types.AuthorizationDataEntry {
-	a := types.AuthorizationDataEntry{}
-	a.ADType = int32(readInt16(b, p, e))
-	a.ADData = readData(b, p, e)
-	return a
-}
-
-// Read bytes representing a timestamp.
-func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time {
-	return time.Unix(int64(readInt32(b, p, e)), 0)
-}
-
-// Read bytes representing an eight bit integer.
-func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) {
-	buf := bytes.NewBuffer(b[*p : *p+1])
-	binary.Read(buf, *e, &i)
-	*p++
-	return
-}
-
-// Read bytes representing a sixteen bit integer.
-func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) {
-	buf := bytes.NewBuffer(b[*p : *p+2])
-	binary.Read(buf, *e, &i)
-	*p += 2
-	return
-}
-
-// Read bytes representing a thirty two bit integer.
-func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) {
-	buf := bytes.NewBuffer(b[*p : *p+4])
-	binary.Read(buf, *e, &i)
-	*p += 4
-	return
-}
-
-func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte {
-	buf := bytes.NewBuffer(b[*p : *p+s])
-	r := make([]byte, s)
-	binary.Read(buf, *e, &r)
-	*p += s
-	return r
-}
-
-func isNativeEndianLittle() bool {
-	var x = 0x012345678
-	var p = unsafe.Pointer(&x)
-	var bp = (*[4]byte)(p)
-
-	var endian bool
-	if 0x01 == bp[0] {
-		endian = false
-	} else if (0x78 & 0xff) == (bp[0] & 0xff) {
-		endian = true
-	} else {
-		// Default to big endian
-		endian = false
-	}
-	return endian
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go
deleted file mode 100644
index 62acab7..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/credentials/credentials.go
+++ /dev/null
@@ -1,309 +0,0 @@
-// Package credentials provides credentials management for Kerberos 5 authentication.
-package credentials
-
-import (
-	"time"
-
-	"github.com/hashicorp/go-uuid"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/nametype"
-	"gopkg.in/jcmturner/gokrb5.v7/keytab"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-const (
-	// AttributeKeyADCredentials assigned number for AD credentials.
-	AttributeKeyADCredentials = "gokrb5AttributeKeyADCredentials"
-)
-
-// Credentials struct for a user.
-// Contains either a keytab, password or both.
-// Keytabs are used over passwords if both are defined.
-type Credentials struct {
-	username    string
-	displayName string
-	realm       string
-	cname       types.PrincipalName
-	keytab      *keytab.Keytab
-	password    string
-	attributes  map[string]interface{}
-	validUntil  time.Time
-
-	authenticated   bool
-	human           bool
-	authTime        time.Time
-	groupMembership map[string]bool
-	sessionID       string
-}
-
-// ADCredentials contains information obtained from the PAC.
-type ADCredentials struct {
-	EffectiveName       string
-	FullName            string
-	UserID              int
-	PrimaryGroupID      int
-	LogOnTime           time.Time
-	LogOffTime          time.Time
-	PasswordLastSet     time.Time
-	GroupMembershipSIDs []string
-	LogonDomainName     string
-	LogonDomainID       string
-	LogonServer         string
-}
-
-// New creates a new Credentials instance.
-func New(username string, realm string) *Credentials {
-	uid, err := uuid.GenerateUUID()
-	if err != nil {
-		uid = "00unique-sess-ions-uuid-unavailable0"
-	}
-	return &Credentials{
-		username:        username,
-		displayName:     username,
-		realm:           realm,
-		cname:           types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, username),
-		keytab:          keytab.New(),
-		attributes:      make(map[string]interface{}),
-		groupMembership: make(map[string]bool),
-		sessionID:       uid,
-		human:           true,
-	}
-}
-
-// NewFromPrincipalName creates a new Credentials instance with the user details provides as a PrincipalName type.
-func NewFromPrincipalName(cname types.PrincipalName, realm string) *Credentials {
-	uid, err := uuid.GenerateUUID()
-	if err != nil {
-		uid = "00unique-sess-ions-uuid-unavailable0"
-	}
-	return &Credentials{
-		username:        cname.PrincipalNameString(),
-		displayName:     cname.PrincipalNameString(),
-		realm:           realm,
-		cname:           cname,
-		keytab:          keytab.New(),
-		attributes:      make(map[string]interface{}),
-		groupMembership: make(map[string]bool),
-		sessionID:       uid,
-		human:           true,
-	}
-}
-
-// WithKeytab sets the Keytab in the Credentials struct.
-func (c *Credentials) WithKeytab(kt *keytab.Keytab) *Credentials {
-	c.keytab = kt
-	c.password = ""
-	return c
-}
-
-// Keytab returns the credential's Keytab.
-func (c *Credentials) Keytab() *keytab.Keytab {
-	return c.keytab
-}
-
-// HasKeytab queries if the Credentials has a keytab defined.
-func (c *Credentials) HasKeytab() bool {
-	if c.keytab != nil && len(c.keytab.Entries) > 0 {
-		return true
-	}
-	return false
-}
-
-// WithPassword sets the password in the Credentials struct.
-func (c *Credentials) WithPassword(password string) *Credentials {
-	c.password = password
-	c.keytab = keytab.New() // clear any keytab
-	return c
-}
-
-// Password returns the credential's password.
-func (c *Credentials) Password() string {
-	return c.password
-}
-
-// HasPassword queries if the Credentials has a password defined.
-func (c *Credentials) HasPassword() bool {
-	if c.password != "" {
-		return true
-	}
-	return false
-}
-
-// SetValidUntil sets the expiry time of the credentials
-func (c *Credentials) SetValidUntil(t time.Time) {
-	c.validUntil = t
-}
-
-// SetADCredentials adds ADCredentials attributes to the credentials
-func (c *Credentials) SetADCredentials(a ADCredentials) {
-	c.SetAttribute(AttributeKeyADCredentials, a)
-	if a.FullName != "" {
-		c.SetDisplayName(a.FullName)
-	}
-	if a.EffectiveName != "" {
-		c.SetUserName(a.EffectiveName)
-	}
-	for i := range a.GroupMembershipSIDs {
-		c.AddAuthzAttribute(a.GroupMembershipSIDs[i])
-	}
-}
-
-// Methods to implement goidentity.Identity interface
-
-// UserName returns the credential's username.
-func (c *Credentials) UserName() string {
-	return c.username
-}
-
-// SetUserName sets the username value on the credential.
-func (c *Credentials) SetUserName(s string) {
-	c.username = s
-}
-
-// CName returns the credential's client principal name.
-func (c *Credentials) CName() types.PrincipalName {
-	return c.cname
-}
-
-// SetCName sets the client principal name on the credential.
-func (c *Credentials) SetCName(pn types.PrincipalName) {
-	c.cname = pn
-}
-
-// Domain returns the credential's domain.
-func (c *Credentials) Domain() string {
-	return c.realm
-}
-
-// SetDomain sets the domain value on the credential.
-func (c *Credentials) SetDomain(s string) {
-	c.realm = s
-}
-
-// Realm returns the credential's realm. Same as the domain.
-func (c *Credentials) Realm() string {
-	return c.Domain()
-}
-
-// SetRealm sets the realm value on the credential. Same as the domain
-func (c *Credentials) SetRealm(s string) {
-	c.SetDomain(s)
-}
-
-// DisplayName returns the credential's display name.
-func (c *Credentials) DisplayName() string {
-	return c.displayName
-}
-
-// SetDisplayName sets the display name value on the credential.
-func (c *Credentials) SetDisplayName(s string) {
-	c.displayName = s
-}
-
-// Human returns if the  credential represents a human or not.
-func (c *Credentials) Human() bool {
-	return c.human
-}
-
-// SetHuman sets the credential as human.
-func (c *Credentials) SetHuman(b bool) {
-	c.human = b
-}
-
-// AuthTime returns the time the credential was authenticated.
-func (c *Credentials) AuthTime() time.Time {
-	return c.authTime
-}
-
-// SetAuthTime sets the time the credential was authenticated.
-func (c *Credentials) SetAuthTime(t time.Time) {
-	c.authTime = t
-}
-
-// AuthzAttributes returns the credentials authorizing attributes.
-func (c *Credentials) AuthzAttributes() []string {
-	s := make([]string, len(c.groupMembership))
-	i := 0
-	for a := range c.groupMembership {
-		s[i] = a
-		i++
-	}
-	return s
-}
-
-// Authenticated indicates if the credential has been successfully authenticated or not.
-func (c *Credentials) Authenticated() bool {
-	return c.authenticated
-}
-
-// SetAuthenticated sets the credential as having been successfully authenticated.
-func (c *Credentials) SetAuthenticated(b bool) {
-	c.authenticated = b
-}
-
-// AddAuthzAttribute adds an authorization attribute to the credential.
-func (c *Credentials) AddAuthzAttribute(a string) {
-	c.groupMembership[a] = true
-}
-
-// RemoveAuthzAttribute removes an authorization attribute from the credential.
-func (c *Credentials) RemoveAuthzAttribute(a string) {
-	if _, ok := c.groupMembership[a]; !ok {
-		return
-	}
-	delete(c.groupMembership, a)
-}
-
-// EnableAuthzAttribute toggles an authorization attribute to an enabled state on the credential.
-func (c *Credentials) EnableAuthzAttribute(a string) {
-	if enabled, ok := c.groupMembership[a]; ok && !enabled {
-		c.groupMembership[a] = true
-	}
-}
-
-// DisableAuthzAttribute toggles an authorization attribute to a disabled state on the credential.
-func (c *Credentials) DisableAuthzAttribute(a string) {
-	if enabled, ok := c.groupMembership[a]; ok && enabled {
-		c.groupMembership[a] = false
-	}
-}
-
-// Authorized indicates if the credential has the specified authorizing attribute.
-func (c *Credentials) Authorized(a string) bool {
-	if enabled, ok := c.groupMembership[a]; ok && enabled {
-		return true
-	}
-	return false
-}
-
-// SessionID returns the credential's session ID.
-func (c *Credentials) SessionID() string {
-	return c.sessionID
-}
-
-// Expired indicates if the credential has expired.
-func (c *Credentials) Expired() bool {
-	if !c.validUntil.IsZero() && time.Now().UTC().After(c.validUntil) {
-		return true
-	}
-	return false
-}
-
-// Attributes returns the Credentials' attributes map.
-func (c *Credentials) Attributes() map[string]interface{} {
-	return c.attributes
-}
-
-// SetAttribute sets the value of an attribute.
-func (c *Credentials) SetAttribute(k string, v interface{}) {
-	c.attributes[k] = v
-}
-
-// SetAttributes replaces the attributes map with the one provided.
-func (c *Credentials) SetAttributes(a map[string]interface{}) {
-	c.attributes = a
-}
-
-// RemoveAttribute deletes an attribute from the attribute map that has the key provided.
-func (c *Credentials) RemoveAttribute(k string) {
-	delete(c.attributes, k)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha1-96.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha1-96.go
deleted file mode 100644
index 90b5df0..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha1-96.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package crypto
-
-import (
-	"crypto/aes"
-	"crypto/hmac"
-	"crypto/sha1"
-	"hash"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-// RFC 3962
-//+--------------------------------------------------------------------+
-//|               protocol key format        128- or 256-bit string    |
-//|                                                                    |
-//|            string-to-key function        PBKDF2+DK with variable   |
-//|                                          iteration count (see      |
-//|                                          above)                    |
-//|                                                                    |
-//|  default string-to-key parameters        00 00 10 00               |
-//|                                                                    |
-//|        key-generation seed length        key size                  |
-//|                                                                    |
-//|            random-to-key function        identity function         |
-//|                                                                    |
-//|                  hash function, H        SHA-1                     |
-//|                                                                    |
-//|               HMAC output size, h        12 octets (96 bits)       |
-//|                                                                    |
-//|             message block size, m        1 octet                   |
-//|                                                                    |
-//|  encryption/decryption functions,        AES in CBC-CTS mode       |
-//|  E and D                                 (cipher block size 16     |
-//|                                          octets), with next-to-    |
-//|                                          last block (last block    |
-//|                                          if only one) as CBC-style |
-//|                                          ivec                      |
-//+--------------------------------------------------------------------+
-//
-//+--------------------------------------------------------------------+
-//|                         encryption types                           |
-//+--------------------------------------------------------------------+
-//|         type name                  etype value          key size   |
-//+--------------------------------------------------------------------+
-//|   aes128-cts-hmac-sha1-96              17                 128      |
-//|   aes256-cts-hmac-sha1-96              18                 256      |
-//+--------------------------------------------------------------------+
-//
-//+--------------------------------------------------------------------+
-//|                          checksum types                            |
-//+--------------------------------------------------------------------+
-//|        type name                 sumtype value           length    |
-//+--------------------------------------------------------------------+
-//|    hmac-sha1-96-aes128                15                   96      |
-//|    hmac-sha1-96-aes256                16                   96      |
-//+--------------------------------------------------------------------+
-
-// Aes128CtsHmacSha96 implements Kerberos encryption type aes128-cts-hmac-sha1-96
-type Aes128CtsHmacSha96 struct {
-}
-
-// GetETypeID returns the EType ID number.
-func (e Aes128CtsHmacSha96) GetETypeID() int32 {
-	return etypeID.AES128_CTS_HMAC_SHA1_96
-}
-
-// GetHashID returns the checksum type ID number.
-func (e Aes128CtsHmacSha96) GetHashID() int32 {
-	return chksumtype.HMAC_SHA1_96_AES128
-}
-
-// GetKeyByteSize returns the number of bytes for key of this etype.
-func (e Aes128CtsHmacSha96) GetKeyByteSize() int {
-	return 128 / 8
-}
-
-// GetKeySeedBitLength returns the number of bits for the seed for key generation.
-func (e Aes128CtsHmacSha96) GetKeySeedBitLength() int {
-	return e.GetKeyByteSize() * 8
-}
-
-// GetHashFunc returns the hash function for this etype.
-func (e Aes128CtsHmacSha96) GetHashFunc() func() hash.Hash {
-	return sha1.New
-}
-
-// GetMessageBlockByteSize returns the block size for the etype's messages.
-func (e Aes128CtsHmacSha96) GetMessageBlockByteSize() int {
-	return 1
-}
-
-// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
-func (e Aes128CtsHmacSha96) GetDefaultStringToKeyParams() string {
-	return "00001000"
-}
-
-// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
-func (e Aes128CtsHmacSha96) GetConfounderByteSize() int {
-	return aes.BlockSize
-}
-
-// GetHMACBitLength returns the bit count size of the integrity hash.
-func (e Aes128CtsHmacSha96) GetHMACBitLength() int {
-	return 96
-}
-
-// GetCypherBlockBitLength returns the bit count size of the cypher block.
-func (e Aes128CtsHmacSha96) GetCypherBlockBitLength() int {
-	return aes.BlockSize * 8
-}
-
-// StringToKey returns a key derived from the string provided.
-func (e Aes128CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
-	return rfc3962.StringToKey(secret, salt, s2kparams, e)
-}
-
-// RandomToKey returns a key from the bytes provided.
-func (e Aes128CtsHmacSha96) RandomToKey(b []byte) []byte {
-	return rfc3961.RandomToKey(b)
-}
-
-// EncryptData encrypts the data provided.
-func (e Aes128CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) {
-	return rfc3962.EncryptData(key, data, e)
-}
-
-// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
-func (e Aes128CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
-	return rfc3962.EncryptMessage(key, message, usage, e)
-}
-
-// DecryptData decrypts the data provided.
-func (e Aes128CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) {
-	return rfc3962.DecryptData(key, data, e)
-}
-
-// DecryptMessage decrypts the message provided and verifies the integrity of the message.
-func (e Aes128CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
-	return rfc3962.DecryptMessage(key, ciphertext, usage, e)
-}
-
-// DeriveKey derives a key from the protocol key based on the usage value.
-func (e Aes128CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
-	return rfc3961.DeriveKey(protocolKey, usage, e)
-}
-
-// DeriveRandom generates data needed for key generation.
-func (e Aes128CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
-	return rfc3961.DeriveRandom(protocolKey, usage, e)
-}
-
-// VerifyIntegrity checks the integrity of the plaintext message.
-func (e Aes128CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
-	return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e)
-}
-
-// GetChecksumHash returns a keyed checksum hash of the bytes provided.
-func (e Aes128CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
-	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
-}
-
-// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
-func (e Aes128CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
-	c, err := e.GetChecksumHash(protocolKey, data, usage)
-	if err != nil {
-		return false
-	}
-	return hmac.Equal(chksum, c)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha256-128.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha256-128.go
deleted file mode 100644
index 49a1b07..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes128-cts-hmac-sha256-128.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package crypto
-
-import (
-	"crypto/aes"
-	"crypto/hmac"
-	"crypto/sha256"
-	"hash"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-// RFC https://tools.ietf.org/html/rfc8009
-
-// Aes128CtsHmacSha256128 implements Kerberos encryption type aes128-cts-hmac-sha256-128
-type Aes128CtsHmacSha256128 struct {
-}
-
-// GetETypeID returns the EType ID number.
-func (e Aes128CtsHmacSha256128) GetETypeID() int32 {
-	return etypeID.AES128_CTS_HMAC_SHA256_128
-}
-
-// GetHashID returns the checksum type ID number.
-func (e Aes128CtsHmacSha256128) GetHashID() int32 {
-	return chksumtype.HMAC_SHA256_128_AES128
-}
-
-// GetKeyByteSize returns the number of bytes for key of this etype.
-func (e Aes128CtsHmacSha256128) GetKeyByteSize() int {
-	return 128 / 8
-}
-
-// GetKeySeedBitLength returns the number of bits for the seed for key generation.
-func (e Aes128CtsHmacSha256128) GetKeySeedBitLength() int {
-	return e.GetKeyByteSize() * 8
-}
-
-// GetHashFunc returns the hash function for this etype.
-func (e Aes128CtsHmacSha256128) GetHashFunc() func() hash.Hash {
-	return sha256.New
-}
-
-// GetMessageBlockByteSize returns the block size for the etype's messages.
-func (e Aes128CtsHmacSha256128) GetMessageBlockByteSize() int {
-	return 1
-}
-
-// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
-func (e Aes128CtsHmacSha256128) GetDefaultStringToKeyParams() string {
-	return "00008000"
-}
-
-// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
-func (e Aes128CtsHmacSha256128) GetConfounderByteSize() int {
-	return aes.BlockSize
-}
-
-// GetHMACBitLength returns the bit count size of the integrity hash.
-func (e Aes128CtsHmacSha256128) GetHMACBitLength() int {
-	return 128
-}
-
-// GetCypherBlockBitLength returns the bit count size of the cypher block.
-func (e Aes128CtsHmacSha256128) GetCypherBlockBitLength() int {
-	return aes.BlockSize * 8
-}
-
-// StringToKey returns a key derived from the string provided.
-func (e Aes128CtsHmacSha256128) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
-	saltp := rfc8009.GetSaltP(salt, "aes128-cts-hmac-sha256-128")
-	return rfc8009.StringToKey(secret, saltp, s2kparams, e)
-}
-
-// RandomToKey returns a key from the bytes provided.
-func (e Aes128CtsHmacSha256128) RandomToKey(b []byte) []byte {
-	return rfc8009.RandomToKey(b)
-}
-
-// EncryptData encrypts the data provided.
-func (e Aes128CtsHmacSha256128) EncryptData(key, data []byte) ([]byte, []byte, error) {
-	return rfc8009.EncryptData(key, data, e)
-}
-
-// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
-func (e Aes128CtsHmacSha256128) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
-	return rfc8009.EncryptMessage(key, message, usage, e)
-}
-
-// DecryptData decrypts the data provided.
-func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) {
-	return rfc8009.DecryptData(key, data, e)
-}
-
-// DecryptMessage decrypts the message provided and verifies the integrity of the message.
-func (e Aes128CtsHmacSha256128) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
-	return rfc8009.DecryptMessage(key, ciphertext, usage, e)
-}
-
-// DeriveKey derives a key from the protocol key based on the usage value.
-func (e Aes128CtsHmacSha256128) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
-	return rfc8009.DeriveKey(protocolKey, usage, e), nil
-}
-
-// DeriveRandom generates data needed for key generation.
-func (e Aes128CtsHmacSha256128) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
-	return rfc8009.DeriveRandom(protocolKey, usage, e)
-}
-
-// VerifyIntegrity checks the integrity of the ciphertext message.
-// The HMAC is calculated over the cipher state concatenated with the
-// AES output, instead of being calculated over the confounder and
-// plaintext.  This allows the message receiver to verify the
-// integrity of the message before decrypting the message.
-// Therefore the pt value to this interface method is not use. Pass any []byte.
-func (e Aes128CtsHmacSha256128) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
-	// We don't need ib just there for the interface
-	return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e)
-}
-
-// GetChecksumHash returns a keyed checksum hash of the bytes provided.
-func (e Aes128CtsHmacSha256128) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
-	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
-}
-
-// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
-func (e Aes128CtsHmacSha256128) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
-	c, err := e.GetChecksumHash(protocolKey, data, usage)
-	if err != nil {
-		return false
-	}
-	return hmac.Equal(chksum, c)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha1-96.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha1-96.go
deleted file mode 100644
index 0cdbb7e..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha1-96.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package crypto
-
-import (
-	"crypto/aes"
-	"crypto/hmac"
-	"crypto/sha1"
-	"hash"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-// RFC 3962
-//+--------------------------------------------------------------------+
-//|               protocol key format        128- or 256-bit string    |
-//|                                                                    |
-//|            string-to-key function        PBKDF2+DK with variable   |
-//|                                          iteration count (see      |
-//|                                          above)                    |
-//|                                                                    |
-//|  default string-to-key parameters        00 00 10 00               |
-//|                                                                    |
-//|        key-generation seed length        key size                  |
-//|                                                                    |
-//|            random-to-key function        identity function         |
-//|                                                                    |
-//|                  hash function, H        SHA-1                     |
-//|                                                                    |
-//|               HMAC output size, h        12 octets (96 bits)       |
-//|                                                                    |
-//|             message block size, m        1 octet                   |
-//|                                                                    |
-//|  encryption/decryption functions,        AES in CBC-CTS mode       |
-//|  E and D                                 (cipher block size 16     |
-//|                                          octets), with next-to-    |
-//|                                          last block (last block    |
-//|                                          if only one) as CBC-style |
-//|                                          ivec                      |
-//+--------------------------------------------------------------------+
-//
-//+--------------------------------------------------------------------+
-//|                         encryption types                           |
-//+--------------------------------------------------------------------+
-//|         type name                  etype value          key size   |
-//+--------------------------------------------------------------------+
-//|   aes128-cts-hmac-sha1-96              17                 128      |
-//|   aes256-cts-hmac-sha1-96              18                 256      |
-//+--------------------------------------------------------------------+
-//
-//+--------------------------------------------------------------------+
-//|                          checksum types                            |
-//+--------------------------------------------------------------------+
-//|        type name                 sumtype value           length    |
-//+--------------------------------------------------------------------+
-//|    hmac-sha1-96-aes128                15                   96      |
-//|    hmac-sha1-96-aes256                16                   96      |
-//+--------------------------------------------------------------------+
-
-// Aes256CtsHmacSha96 implements Kerberos encryption type aes256-cts-hmac-sha1-96
-type Aes256CtsHmacSha96 struct {
-}
-
-// GetETypeID returns the EType ID number.
-func (e Aes256CtsHmacSha96) GetETypeID() int32 {
-	return etypeID.AES256_CTS_HMAC_SHA1_96
-}
-
-// GetHashID returns the checksum type ID number.
-func (e Aes256CtsHmacSha96) GetHashID() int32 {
-	return chksumtype.HMAC_SHA1_96_AES256
-}
-
-// GetKeyByteSize returns the number of bytes for key of this etype.
-func (e Aes256CtsHmacSha96) GetKeyByteSize() int {
-	return 256 / 8
-}
-
-// GetKeySeedBitLength returns the number of bits for the seed for key generation.
-func (e Aes256CtsHmacSha96) GetKeySeedBitLength() int {
-	return e.GetKeyByteSize() * 8
-}
-
-// GetHashFunc returns the hash function for this etype.
-func (e Aes256CtsHmacSha96) GetHashFunc() func() hash.Hash {
-	return sha1.New
-}
-
-// GetMessageBlockByteSize returns the block size for the etype's messages.
-func (e Aes256CtsHmacSha96) GetMessageBlockByteSize() int {
-	return 1
-}
-
-// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
-func (e Aes256CtsHmacSha96) GetDefaultStringToKeyParams() string {
-	return "00001000"
-}
-
-// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
-func (e Aes256CtsHmacSha96) GetConfounderByteSize() int {
-	return aes.BlockSize
-}
-
-// GetHMACBitLength returns the bit count size of the integrity hash.
-func (e Aes256CtsHmacSha96) GetHMACBitLength() int {
-	return 96
-}
-
-// GetCypherBlockBitLength returns the bit count size of the cypher block.
-func (e Aes256CtsHmacSha96) GetCypherBlockBitLength() int {
-	return aes.BlockSize * 8
-}
-
-// StringToKey returns a key derived from the string provided.
-func (e Aes256CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
-	return rfc3962.StringToKey(secret, salt, s2kparams, e)
-}
-
-// RandomToKey returns a key from the bytes provided.
-func (e Aes256CtsHmacSha96) RandomToKey(b []byte) []byte {
-	return rfc3961.RandomToKey(b)
-}
-
-// EncryptData encrypts the data provided.
-func (e Aes256CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) {
-	return rfc3962.EncryptData(key, data, e)
-}
-
-// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
-func (e Aes256CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
-	return rfc3962.EncryptMessage(key, message, usage, e)
-}
-
-// DecryptData decrypts the data provided.
-func (e Aes256CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) {
-	return rfc3962.DecryptData(key, data, e)
-}
-
-// DecryptMessage decrypts the message provided and verifies the integrity of the message.
-func (e Aes256CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
-	return rfc3962.DecryptMessage(key, ciphertext, usage, e)
-}
-
-// DeriveKey derives a key from the protocol key based on the usage value.
-func (e Aes256CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
-	return rfc3961.DeriveKey(protocolKey, usage, e)
-}
-
-// DeriveRandom generates data needed for key generation.
-func (e Aes256CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
-	return rfc3961.DeriveRandom(protocolKey, usage, e)
-}
-
-// VerifyIntegrity checks the integrity of the plaintext message.
-func (e Aes256CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
-	return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e)
-}
-
-// GetChecksumHash returns a keyed checksum hash of the bytes provided.
-func (e Aes256CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
-	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
-}
-
-// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
-func (e Aes256CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
-	c, err := e.GetChecksumHash(protocolKey, data, usage)
-	if err != nil {
-		return false
-	}
-	return hmac.Equal(chksum, c)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha384-192.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha384-192.go
deleted file mode 100644
index 562b078..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/aes256-cts-hmac-sha384-192.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package crypto
-
-import (
-	"crypto/aes"
-	"crypto/hmac"
-	"crypto/sha512"
-	"hash"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-// RFC https://tools.ietf.org/html/rfc8009
-
-// Aes256CtsHmacSha384192 implements Kerberos encryption type aes256-cts-hmac-sha384-192
-type Aes256CtsHmacSha384192 struct {
-}
-
-// GetETypeID returns the EType ID number.
-func (e Aes256CtsHmacSha384192) GetETypeID() int32 {
-	return etypeID.AES256_CTS_HMAC_SHA384_192
-}
-
-// GetHashID returns the checksum type ID number.
-func (e Aes256CtsHmacSha384192) GetHashID() int32 {
-	return chksumtype.HMAC_SHA384_192_AES256
-}
-
-// GetKeyByteSize returns the number of bytes for key of this etype.
-func (e Aes256CtsHmacSha384192) GetKeyByteSize() int {
-	return 192 / 8
-}
-
-// GetKeySeedBitLength returns the number of bits for the seed for key generation.
-func (e Aes256CtsHmacSha384192) GetKeySeedBitLength() int {
-	return e.GetKeyByteSize() * 8
-}
-
-// GetHashFunc returns the hash function for this etype.
-func (e Aes256CtsHmacSha384192) GetHashFunc() func() hash.Hash {
-	return sha512.New384
-}
-
-// GetMessageBlockByteSize returns the block size for the etype's messages.
-func (e Aes256CtsHmacSha384192) GetMessageBlockByteSize() int {
-	return 1
-}
-
-// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
-func (e Aes256CtsHmacSha384192) GetDefaultStringToKeyParams() string {
-	return "00008000"
-}
-
-// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
-func (e Aes256CtsHmacSha384192) GetConfounderByteSize() int {
-	return aes.BlockSize
-}
-
-// GetHMACBitLength returns the bit count size of the integrity hash.
-func (e Aes256CtsHmacSha384192) GetHMACBitLength() int {
-	return 192
-}
-
-// GetCypherBlockBitLength returns the bit count size of the cypher block.
-func (e Aes256CtsHmacSha384192) GetCypherBlockBitLength() int {
-	return aes.BlockSize * 8
-}
-
-// StringToKey returns a key derived from the string provided.
-func (e Aes256CtsHmacSha384192) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
-	saltp := rfc8009.GetSaltP(salt, "aes256-cts-hmac-sha384-192")
-	return rfc8009.StringToKey(secret, saltp, s2kparams, e)
-}
-
-// RandomToKey returns a key from the bytes provided.
-func (e Aes256CtsHmacSha384192) RandomToKey(b []byte) []byte {
-	return rfc8009.RandomToKey(b)
-}
-
-// EncryptData encrypts the data provided.
-func (e Aes256CtsHmacSha384192) EncryptData(key, data []byte) ([]byte, []byte, error) {
-	return rfc8009.EncryptData(key, data, e)
-}
-
-// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
-func (e Aes256CtsHmacSha384192) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
-	return rfc8009.EncryptMessage(key, message, usage, e)
-}
-
-// DecryptData decrypts the data provided.
-func (e Aes256CtsHmacSha384192) DecryptData(key, data []byte) ([]byte, error) {
-	return rfc8009.DecryptData(key, data, e)
-}
-
-// DecryptMessage decrypts the message provided and verifies the integrity of the message.
-func (e Aes256CtsHmacSha384192) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
-	return rfc8009.DecryptMessage(key, ciphertext, usage, e)
-}
-
-// DeriveKey derives a key from the protocol key based on the usage value.
-func (e Aes256CtsHmacSha384192) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
-	return rfc8009.DeriveKey(protocolKey, usage, e), nil
-}
-
-// DeriveRandom generates data needed for key generation.
-func (e Aes256CtsHmacSha384192) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
-	return rfc8009.DeriveRandom(protocolKey, usage, e)
-}
-
-// VerifyIntegrity checks the integrity of the ciphertext message.
-// The HMAC is calculated over the cipher state concatenated with the
-// AES output, instead of being calculated over the confounder and
-// plaintext.  This allows the message receiver to verify the
-// integrity of the message before decrypting the message.
-// Therefore the pt value to this interface method is not use. Pass any []byte.
-func (e Aes256CtsHmacSha384192) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
-	// We don't need ib just there for the interface
-	return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e)
-}
-
-// GetChecksumHash returns a keyed checksum hash of the bytes provided.
-func (e Aes256CtsHmacSha384192) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
-	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
-}
-
-// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
-func (e Aes256CtsHmacSha384192) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
-	c, err := e.GetChecksumHash(protocolKey, data, usage)
-	if err != nil {
-		return false
-	}
-	return hmac.Equal(chksum, c)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/common/common.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/common/common.go
deleted file mode 100644
index 96ae549..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/common/common.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Package common provides encryption methods common across encryption types
-package common
-
-import (
-	"bytes"
-	"crypto/hmac"
-	"encoding/binary"
-	"encoding/hex"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-)
-
-// ZeroPad pads bytes with zeros to nearest multiple of message size m.
-func ZeroPad(b []byte, m int) ([]byte, error) {
-	if m <= 0 {
-		return nil, errors.New("Invalid message block size when padding")
-	}
-	if b == nil || len(b) == 0 {
-		return nil, errors.New("Data not valid to pad: Zero size")
-	}
-	if l := len(b) % m; l != 0 {
-		n := m - l
-		z := make([]byte, n)
-		b = append(b, z...)
-	}
-	return b, nil
-}
-
-// PKCS7Pad pads bytes according to RFC 2315 to nearest multiple of message size m.
-func PKCS7Pad(b []byte, m int) ([]byte, error) {
-	if m <= 0 {
-		return nil, errors.New("Invalid message block size when padding")
-	}
-	if b == nil || len(b) == 0 {
-		return nil, errors.New("Data not valid to pad: Zero size")
-	}
-	n := m - (len(b) % m)
-	pb := make([]byte, len(b)+n)
-	copy(pb, b)
-	copy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n))
-	return pb, nil
-}
-
-// PKCS7Unpad removes RFC 2315 padding from byes where message size is m.
-func PKCS7Unpad(b []byte, m int) ([]byte, error) {
-	if m <= 0 {
-		return nil, errors.New("invalid message block size when unpadding")
-	}
-	if b == nil || len(b) == 0 {
-		return nil, errors.New("padded data not valid: Zero size")
-	}
-	if len(b)%m != 0 {
-		return nil, errors.New("padded data not valid: Not multiple of message block size")
-	}
-	c := b[len(b)-1]
-	n := int(c)
-	if n == 0 || n > len(b) {
-		return nil, errors.New("padded data not valid: Data may not have been padded")
-	}
-	for i := 0; i < n; i++ {
-		if b[len(b)-n+i] != c {
-			return nil, errors.New("padded data not valid")
-		}
-	}
-	return b[:len(b)-n], nil
-}
-
-// GetHash generates the keyed hash value according to the etype's hash function.
-func GetHash(pt, key []byte, usage []byte, etype etype.EType) ([]byte, error) {
-	k, err := etype.DeriveKey(key, usage)
-	if err != nil {
-		return nil, fmt.Errorf("unable to derive key for checksum: %v", err)
-	}
-	mac := hmac.New(etype.GetHashFunc(), k)
-	p := make([]byte, len(pt))
-	copy(p, pt)
-	mac.Write(p)
-	return mac.Sum(nil)[:etype.GetHMACBitLength()/8], nil
-}
-
-// GetChecksumHash returns a keyed checksum hash of the bytes provided.
-func GetChecksumHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) {
-	return GetHash(b, key, GetUsageKc(usage), etype)
-}
-
-// GetIntegrityHash returns a keyed integrity hash of the bytes provided.
-func GetIntegrityHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) {
-	return GetHash(b, key, GetUsageKi(usage), etype)
-}
-
-// VerifyChecksum compares the checksum of the msg bytes is the same as the checksum provided.
-func VerifyChecksum(key, chksum, msg []byte, usage uint32, etype etype.EType) bool {
-	//The ciphertext output is the concatenation of the output of the basic
-	//encryption function E and a (possibly truncated) HMAC using the
-	//specified hash function H, both applied to the plaintext with a
-	//random confounder prefix and sufficient padding to bring it to a
-	//multiple of the message block size.  When the HMAC is computed, the
-	//key is used in the protocol key form.
-	expectedMAC, _ := GetChecksumHash(msg, key, usage, etype)
-	return hmac.Equal(chksum, expectedMAC)
-}
-
-// GetUsageKc returns the checksum key usage value for the usage number un.
-//
-// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below.
-//
-// Kc = DK(base-key, usage | 0x99);
-func GetUsageKc(un uint32) []byte {
-	return getUsage(un, 0x99)
-}
-
-// GetUsageKe returns the encryption key usage value for the usage number un
-//
-// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below.
-//
-// Ke = DK(base-key, usage | 0xAA);
-func GetUsageKe(un uint32) []byte {
-	return getUsage(un, 0xAA)
-}
-
-// GetUsageKi returns the integrity key usage value for the usage number un
-//
-// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below.
-//
-// Ki = DK(base-key, usage | 0x55);
-func GetUsageKi(un uint32) []byte {
-	return getUsage(un, 0x55)
-}
-
-func getUsage(un uint32, o byte) []byte {
-	var buf bytes.Buffer
-	binary.Write(&buf, binary.BigEndian, un)
-	return append(buf.Bytes(), o)
-}
-
-// IterationsToS2Kparams converts the number of iterations as an integer to a string representation.
-func IterationsToS2Kparams(i uint32) string {
-	b := make([]byte, 4, 4)
-	binary.BigEndian.PutUint32(b, i)
-	return hex.EncodeToString(b)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/crypto.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/crypto.go
deleted file mode 100644
index e04e968..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/crypto.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Package crypto implements cryptographic functions for Kerberos 5 implementation.
-package crypto
-
-import (
-	"encoding/hex"
-	"fmt"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/patype"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// GetEtype returns an instances of the required etype struct for the etype ID.
-func GetEtype(id int32) (etype.EType, error) {
-	switch id {
-	case etypeID.AES128_CTS_HMAC_SHA1_96:
-		var et Aes128CtsHmacSha96
-		return et, nil
-	case etypeID.AES256_CTS_HMAC_SHA1_96:
-		var et Aes256CtsHmacSha96
-		return et, nil
-	case etypeID.AES128_CTS_HMAC_SHA256_128:
-		var et Aes128CtsHmacSha256128
-		return et, nil
-	case etypeID.AES256_CTS_HMAC_SHA384_192:
-		var et Aes256CtsHmacSha384192
-		return et, nil
-	case etypeID.DES3_CBC_SHA1_KD:
-		var et Des3CbcSha1Kd
-		return et, nil
-	case etypeID.RC4_HMAC:
-		var et RC4HMAC
-		return et, nil
-	default:
-		return nil, fmt.Errorf("unknown or unsupported EType: %d", id)
-	}
-}
-
-// GetChksumEtype returns an instances of the required etype struct for the checksum ID.
-func GetChksumEtype(id int32) (etype.EType, error) {
-	switch id {
-	case chksumtype.HMAC_SHA1_96_AES128:
-		var et Aes128CtsHmacSha96
-		return et, nil
-	case chksumtype.HMAC_SHA1_96_AES256:
-		var et Aes256CtsHmacSha96
-		return et, nil
-	case chksumtype.HMAC_SHA256_128_AES128:
-		var et Aes128CtsHmacSha256128
-		return et, nil
-	case chksumtype.HMAC_SHA384_192_AES256:
-		var et Aes256CtsHmacSha384192
-		return et, nil
-	case chksumtype.HMAC_SHA1_DES3_KD:
-		var et Des3CbcSha1Kd
-		return et, nil
-	case chksumtype.KERB_CHECKSUM_HMAC_MD5:
-		var et RC4HMAC
-		return et, nil
-	//case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED:
-	//	var et RC4HMAC
-	//	return et, nil
-	default:
-		return nil, fmt.Errorf("unknown or unsupported checksum type: %d", id)
-	}
-}
-
-// GetKeyFromPassword generates an encryption key from the principal's password.
-func GetKeyFromPassword(passwd string, cname types.PrincipalName, realm string, etypeID int32, pas types.PADataSequence) (types.EncryptionKey, etype.EType, error) {
-	var key types.EncryptionKey
-	et, err := GetEtype(etypeID)
-	if err != nil {
-		return key, et, fmt.Errorf("error getting encryption type: %v", err)
-	}
-	sk2p := et.GetDefaultStringToKeyParams()
-	var salt string
-	var paID int32
-	for _, pa := range pas {
-		switch pa.PADataType {
-		case patype.PA_PW_SALT:
-			if paID > pa.PADataType {
-				continue
-			}
-			salt = string(pa.PADataValue)
-		case patype.PA_ETYPE_INFO:
-			if paID > pa.PADataType {
-				continue
-			}
-			var eti types.ETypeInfo
-			err := eti.Unmarshal(pa.PADataValue)
-			if err != nil {
-				return key, et, fmt.Errorf("error unmashaling PA Data to PA-ETYPE-INFO2: %v", err)
-			}
-			if etypeID != eti[0].EType {
-				et, err = GetEtype(eti[0].EType)
-				if err != nil {
-					return key, et, fmt.Errorf("error getting encryption type: %v", err)
-				}
-			}
-			salt = string(eti[0].Salt)
-		case patype.PA_ETYPE_INFO2:
-			if paID > pa.PADataType {
-				continue
-			}
-			var et2 types.ETypeInfo2
-			err := et2.Unmarshal(pa.PADataValue)
-			if err != nil {
-				return key, et, fmt.Errorf("error unmashalling PA Data to PA-ETYPE-INFO2: %v", err)
-			}
-			if etypeID != et2[0].EType {
-				et, err = GetEtype(et2[0].EType)
-				if err != nil {
-					return key, et, fmt.Errorf("error getting encryption type: %v", err)
-				}
-			}
-			if len(et2[0].S2KParams) == 4 {
-				sk2p = hex.EncodeToString(et2[0].S2KParams)
-			}
-			salt = et2[0].Salt
-		}
-	}
-	if salt == "" {
-		salt = cname.GetSalt(realm)
-	}
-	k, err := et.StringToKey(passwd, salt, sk2p)
-	if err != nil {
-		return key, et, fmt.Errorf("error deriving key from string: %+v", err)
-	}
-	key = types.EncryptionKey{
-		KeyType:  etypeID,
-		KeyValue: k,
-	}
-	return key, et, nil
-}
-
-// GetEncryptedData encrypts the data provided and returns and EncryptedData type.
-// Pass a usage value of zero to use the key provided directly rather than deriving one.
-func GetEncryptedData(plainBytes []byte, key types.EncryptionKey, usage uint32, kvno int) (types.EncryptedData, error) {
-	var ed types.EncryptedData
-	et, err := GetEtype(key.KeyType)
-	if err != nil {
-		return ed, fmt.Errorf("error getting etype: %v", err)
-	}
-	_, b, err := et.EncryptMessage(key.KeyValue, plainBytes, usage)
-	if err != nil {
-		return ed, err
-	}
-
-	ed = types.EncryptedData{
-		EType:  key.KeyType,
-		Cipher: b,
-		KVNO:   kvno,
-	}
-	return ed, nil
-}
-
-// DecryptEncPart decrypts the EncryptedData.
-func DecryptEncPart(ed types.EncryptedData, key types.EncryptionKey, usage uint32) ([]byte, error) {
-	return DecryptMessage(ed.Cipher, key, usage)
-}
-
-// DecryptMessage decrypts the ciphertext and verifies the integrity.
-func DecryptMessage(ciphertext []byte, key types.EncryptionKey, usage uint32) ([]byte, error) {
-	et, err := GetEtype(key.KeyType)
-	if err != nil {
-		return []byte{}, fmt.Errorf("error decrypting: %v", err)
-	}
-	b, err := et.DecryptMessage(key.KeyValue, ciphertext, usage)
-	if err != nil {
-		return nil, fmt.Errorf("error decrypting: %v", err)
-	}
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/des3-cbc-sha1-kd.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/des3-cbc-sha1-kd.go
deleted file mode 100644
index db3a149..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/des3-cbc-sha1-kd.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package crypto
-
-import (
-	"crypto/des"
-	"crypto/hmac"
-	"crypto/sha1"
-	"errors"
-	"hash"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-//RFC: 3961 Section 6.3
-
-/*
-                 des3-cbc-hmac-sha1-kd, hmac-sha1-des3-kd
-              ------------------------------------------------
-              protocol key format     24 bytes, parity in low
-                                      bit of each
-
-              key-generation seed     21 bytes
-              length
-
-              hash function           SHA-1
-
-              HMAC output size        160 bits
-
-              message block size      8 bytes
-
-              default string-to-key   empty string
-              params
-
-              encryption and          triple-DES encrypt and
-              decryption functions    decrypt, in outer-CBC
-                                      mode (cipher block size
-                                      8 octets)
-
-              key generation functions:
-
-              random-to-key           DES3random-to-key (see
-                                      below)
-
-              string-to-key           DES3string-to-key (see
-                                      below)
-
-   The des3-cbc-hmac-sha1-kd encryption type is assigned the value
-   sixteen (16).  The hmac-sha1-des3-kd checksum algorithm is assigned a
-   checksum type number of twelve (12)*/
-
-// Des3CbcSha1Kd implements Kerberos encryption type des3-cbc-hmac-sha1-kd
-type Des3CbcSha1Kd struct {
-}
-
-// GetETypeID returns the EType ID number.
-func (e Des3CbcSha1Kd) GetETypeID() int32 {
-	return etypeID.DES3_CBC_SHA1_KD
-}
-
-// GetHashID returns the checksum type ID number.
-func (e Des3CbcSha1Kd) GetHashID() int32 {
-	return chksumtype.HMAC_SHA1_DES3_KD
-}
-
-// GetKeyByteSize returns the number of bytes for key of this etype.
-func (e Des3CbcSha1Kd) GetKeyByteSize() int {
-	return 24
-}
-
-// GetKeySeedBitLength returns the number of bits for the seed for key generation.
-func (e Des3CbcSha1Kd) GetKeySeedBitLength() int {
-	return 21 * 8
-}
-
-// GetHashFunc returns the hash function for this etype.
-func (e Des3CbcSha1Kd) GetHashFunc() func() hash.Hash {
-	return sha1.New
-}
-
-// GetMessageBlockByteSize returns the block size for the etype's messages.
-func (e Des3CbcSha1Kd) GetMessageBlockByteSize() int {
-	//For traditional CBC mode with padding, it would be the underlying cipher's block size
-	return des.BlockSize
-}
-
-// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
-func (e Des3CbcSha1Kd) GetDefaultStringToKeyParams() string {
-	var s string
-	return s
-}
-
-// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
-func (e Des3CbcSha1Kd) GetConfounderByteSize() int {
-	return des.BlockSize
-}
-
-// GetHMACBitLength returns the bit count size of the integrity hash.
-func (e Des3CbcSha1Kd) GetHMACBitLength() int {
-	return e.GetHashFunc()().Size() * 8
-}
-
-// GetCypherBlockBitLength returns the bit count size of the cypher block.
-func (e Des3CbcSha1Kd) GetCypherBlockBitLength() int {
-	return des.BlockSize * 8
-}
-
-// StringToKey returns a key derived from the string provided.
-func (e Des3CbcSha1Kd) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
-	if s2kparams != "" {
-		return []byte{}, errors.New("s2kparams must be an empty string")
-	}
-	return rfc3961.DES3StringToKey(secret, salt, e)
-}
-
-// RandomToKey returns a key from the bytes provided.
-func (e Des3CbcSha1Kd) RandomToKey(b []byte) []byte {
-	return rfc3961.DES3RandomToKey(b)
-}
-
-// DeriveRandom generates data needed for key generation.
-func (e Des3CbcSha1Kd) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
-	r, err := rfc3961.DeriveRandom(protocolKey, usage, e)
-	return r, err
-}
-
-// DeriveKey derives a key from the protocol key based on the usage value.
-func (e Des3CbcSha1Kd) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
-	r, err := e.DeriveRandom(protocolKey, usage)
-	if err != nil {
-		return nil, err
-	}
-	return e.RandomToKey(r), nil
-}
-
-// EncryptData encrypts the data provided.
-func (e Des3CbcSha1Kd) EncryptData(key, data []byte) ([]byte, []byte, error) {
-	return rfc3961.DES3EncryptData(key, data, e)
-}
-
-// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
-func (e Des3CbcSha1Kd) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
-	return rfc3961.DES3EncryptMessage(key, message, usage, e)
-}
-
-// DecryptData decrypts the data provided.
-func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) {
-	return rfc3961.DES3DecryptData(key, data, e)
-}
-
-// DecryptMessage decrypts the message provided and verifies the integrity of the message.
-func (e Des3CbcSha1Kd) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
-	return rfc3961.DES3DecryptMessage(key, ciphertext, usage, e)
-}
-
-// VerifyIntegrity checks the integrity of the plaintext message.
-func (e Des3CbcSha1Kd) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
-	return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e)
-}
-
-// GetChecksumHash returns a keyed checksum hash of the bytes provided.
-func (e Des3CbcSha1Kd) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
-	return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e)
-}
-
-// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
-func (e Des3CbcSha1Kd) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
-	c, err := e.GetChecksumHash(protocolKey, data, usage)
-	if err != nil {
-		return false
-	}
-	return hmac.Equal(chksum, c)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go
deleted file mode 100644
index ee7510e..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/etype/etype.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Package etype provides the Kerberos Encryption Type interface
-package etype
-
-import "hash"
-
-// EType is the interface defining the Encryption Type.
-type EType interface {
-	GetETypeID() int32
-	GetHashID() int32
-	GetKeyByteSize() int
-	GetKeySeedBitLength() int                                   // key-generation seed length, k
-	GetDefaultStringToKeyParams() string                        // default string-to-key parameters (s2kparams)
-	StringToKey(string, salt, s2kparams string) ([]byte, error) // string-to-key (UTF-8 string, UTF-8 string, opaque)->(protocol-key)
-	RandomToKey(b []byte) []byte                                // random-to-key (bitstring[K])->(protocol-key)
-	GetHMACBitLength() int                                      // HMAC output size, h
-	GetMessageBlockByteSize() int                               // message block size, m
-	EncryptData(key, data []byte) ([]byte, []byte, error)       // E function - encrypt (specific-key, state, octet string)->(state, octet string)
-	EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error)
-	DecryptData(key, data []byte) ([]byte, error) // D function
-	DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error)
-	GetCypherBlockBitLength() int                           // cipher block size, c
-	GetConfounderByteSize() int                             // This is the same as the cipher block size but in bytes.
-	DeriveKey(protocolKey, usage []byte) ([]byte, error)    // DK key-derivation (protocol-key, integer)->(specific-key)
-	DeriveRandom(protocolKey, usage []byte) ([]byte, error) // DR pseudo-random (protocol-key, octet-string)->(octet-string)
-	VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool
-	GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error)
-	VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool
-	GetHashFunc() func() hash.Hash
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rc4-hmac.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rc4-hmac.go
deleted file mode 100644
index 9df55ee..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rc4-hmac.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package crypto
-
-import (
-	"bytes"
-	"crypto/hmac"
-	"crypto/md5"
-	"hash"
-	"io"
-
-	"golang.org/x/crypto/md4"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-//http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8u40-b25/sun/security/krb5/internal/crypto/dk/ArcFourCrypto.java#ArcFourCrypto.encrypt%28byte%5B%5D%2Cint%2Cbyte%5B%5D%2Cbyte%5B%5D%2Cbyte%5B%5D%2Cint%2Cint%29
-
-// RC4HMAC implements Kerberos encryption type aes256-cts-hmac-sha1-96
-type RC4HMAC struct {
-}
-
-// GetETypeID returns the EType ID number.
-func (e RC4HMAC) GetETypeID() int32 {
-	return etypeID.RC4_HMAC
-}
-
-// GetHashID returns the checksum type ID number.
-func (e RC4HMAC) GetHashID() int32 {
-	return chksumtype.KERB_CHECKSUM_HMAC_MD5
-}
-
-// GetKeyByteSize returns the number of bytes for key of this etype.
-func (e RC4HMAC) GetKeyByteSize() int {
-	return 16
-}
-
-// GetKeySeedBitLength returns the number of bits for the seed for key generation.
-func (e RC4HMAC) GetKeySeedBitLength() int {
-	return e.GetKeyByteSize() * 8
-}
-
-// GetHashFunc returns the hash function for this etype.
-func (e RC4HMAC) GetHashFunc() func() hash.Hash {
-	return md5.New
-}
-
-// GetMessageBlockByteSize returns the block size for the etype's messages.
-func (e RC4HMAC) GetMessageBlockByteSize() int {
-	return 1
-}
-
-// GetDefaultStringToKeyParams returns the default key derivation parameters in string form.
-func (e RC4HMAC) GetDefaultStringToKeyParams() string {
-	return ""
-}
-
-// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations.
-func (e RC4HMAC) GetConfounderByteSize() int {
-	return 8
-}
-
-// GetHMACBitLength returns the bit count size of the integrity hash.
-func (e RC4HMAC) GetHMACBitLength() int {
-	return md5.Size * 8
-}
-
-// GetCypherBlockBitLength returns the bit count size of the cypher block.
-func (e RC4HMAC) GetCypherBlockBitLength() int {
-	return 8 // doesn't really apply
-}
-
-// StringToKey returns a key derived from the string provided.
-func (e RC4HMAC) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) {
-	return rfc4757.StringToKey(secret)
-}
-
-// RandomToKey returns a key from the bytes provided.
-func (e RC4HMAC) RandomToKey(b []byte) []byte {
-	r := bytes.NewReader(b)
-	h := md4.New()
-	io.Copy(h, r)
-	return h.Sum(nil)
-}
-
-// EncryptData encrypts the data provided.
-func (e RC4HMAC) EncryptData(key, data []byte) ([]byte, []byte, error) {
-	b, err := rfc4757.EncryptData(key, data, e)
-	return []byte{}, b, err
-}
-
-// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message.
-func (e RC4HMAC) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) {
-	b, err := rfc4757.EncryptMessage(key, message, usage, false, e)
-	return []byte{}, b, err
-}
-
-// DecryptData decrypts the data provided.
-func (e RC4HMAC) DecryptData(key, data []byte) ([]byte, error) {
-	return rfc4757.DecryptData(key, data, e)
-}
-
-// DecryptMessage decrypts the message provided and verifies the integrity of the message.
-func (e RC4HMAC) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) {
-	return rfc4757.DecryptMessage(key, ciphertext, usage, false, e)
-}
-
-// DeriveKey derives a key from the protocol key based on the usage value.
-func (e RC4HMAC) DeriveKey(protocolKey, usage []byte) ([]byte, error) {
-	return rfc4757.HMAC(protocolKey, usage), nil
-}
-
-// DeriveRandom generates data needed for key generation.
-func (e RC4HMAC) DeriveRandom(protocolKey, usage []byte) ([]byte, error) {
-	return rfc3961.DeriveRandom(protocolKey, usage, e)
-}
-
-// VerifyIntegrity checks the integrity of the plaintext message.
-func (e RC4HMAC) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool {
-	return rfc4757.VerifyIntegrity(protocolKey, pt, ct, e)
-}
-
-// GetChecksumHash returns a keyed checksum hash of the bytes provided.
-func (e RC4HMAC) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) {
-	return rfc4757.Checksum(protocolKey, usage, data)
-}
-
-// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided.
-func (e RC4HMAC) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool {
-	checksum, err := rfc4757.Checksum(protocolKey, usage, data)
-	if err != nil {
-		return false
-	}
-	return hmac.Equal(checksum, chksum)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/encryption.go
deleted file mode 100644
index 6f550fa..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/encryption.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Package rfc3961 provides encryption and checksum methods as specified in RFC 3961
-package rfc3961
-
-import (
-	"crypto/cipher"
-	"crypto/des"
-	"crypto/hmac"
-	"crypto/rand"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-)
-
-// DES3EncryptData encrypts the data provided using DES3 and methods specific to the etype provided.
-func DES3EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) {
-	if len(key) != e.GetKeyByteSize() {
-		return nil, nil, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
-	}
-	data, _ = common.ZeroPad(data, e.GetMessageBlockByteSize())
-
-	block, err := des.NewTripleDESCipher(key)
-	if err != nil {
-		return nil, nil, fmt.Errorf("error creating cipher: %v", err)
-	}
-
-	//RFC 3961: initial cipher state      All bits zero
-	ivz := make([]byte, des.BlockSize)
-
-	ct := make([]byte, len(data))
-	mode := cipher.NewCBCEncrypter(block, ivz)
-	mode.CryptBlocks(ct, data)
-	return ct[len(ct)-e.GetMessageBlockByteSize():], ct, nil
-}
-
-// DES3EncryptMessage encrypts the message provided using DES3 and methods specific to the etype provided.
-// The encrypted data is concatenated with its integrity hash to create an encrypted message.
-func DES3EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) {
-	//confounder
-	c := make([]byte, e.GetConfounderByteSize())
-	_, err := rand.Read(c)
-	if err != nil {
-		return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err)
-	}
-	plainBytes := append(c, message...)
-	plainBytes, _ = common.ZeroPad(plainBytes, e.GetMessageBlockByteSize())
-
-	// Derive key for encryption from usage
-	var k []byte
-	if usage != 0 {
-		k, err = e.DeriveKey(key, common.GetUsageKe(usage))
-		if err != nil {
-			return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err)
-		}
-	}
-
-	iv, b, err := e.EncryptData(k, plainBytes)
-	if err != nil {
-		return iv, b, fmt.Errorf("error encrypting data: %v", err)
-	}
-
-	// Generate and append integrity hash
-	ih, err := common.GetIntegrityHash(plainBytes, key, usage, e)
-	if err != nil {
-		return iv, b, fmt.Errorf("error encrypting data: %v", err)
-	}
-	b = append(b, ih...)
-	return iv, b, nil
-}
-
-// DES3DecryptData decrypts the data provided using DES3 and methods specific to the etype provided.
-func DES3DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
-	if len(key) != e.GetKeyByteSize() {
-		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
-	}
-
-	if len(data) < des.BlockSize || len(data)%des.BlockSize != 0 {
-		return []byte{}, errors.New("ciphertext is not a multiple of the block size")
-	}
-	block, err := des.NewTripleDESCipher(key)
-	if err != nil {
-		return []byte{}, fmt.Errorf("error creating cipher: %v", err)
-	}
-	pt := make([]byte, len(data))
-	ivz := make([]byte, des.BlockSize)
-	mode := cipher.NewCBCDecrypter(block, ivz)
-	mode.CryptBlocks(pt, data)
-	return pt, nil
-}
-
-// DES3DecryptMessage decrypts the message provided using DES3 and methods specific to the etype provided.
-// The integrity of the message is also verified.
-func DES3DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) {
-	//Derive the key
-	k, err := e.DeriveKey(key, common.GetUsageKe(usage))
-	if err != nil {
-		return nil, fmt.Errorf("error deriving key: %v", err)
-	}
-	// Strip off the checksum from the end
-	b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8])
-	if err != nil {
-		return nil, fmt.Errorf("error decrypting: %v", err)
-	}
-	//Verify checksum
-	if !e.VerifyIntegrity(key, ciphertext, b, usage) {
-		return nil, errors.New("error decrypting: integrity verification failed")
-	}
-	//Remove the confounder bytes
-	return b[e.GetConfounderByteSize():], nil
-}
-
-// VerifyIntegrity verifies the integrity of cipertext bytes ct.
-func VerifyIntegrity(key, ct, pt []byte, usage uint32, etype etype.EType) bool {
-	//The ciphertext output is the concatenation of the output of the basic
-	//encryption function E and a (possibly truncated) HMAC using the
-	//specified hash function H, both applied to the plaintext with a
-	//random confounder prefix and sufficient padding to bring it to a
-	//multiple of the message block size.  When the HMAC is computed, the
-	//key is used in the protocol key form.
-	h := make([]byte, etype.GetHMACBitLength()/8)
-	copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:])
-	expectedMAC, _ := common.GetIntegrityHash(pt, key, usage, etype)
-	return hmac.Equal(h, expectedMAC)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/keyDerivation.go
deleted file mode 100644
index 8c637a2..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/keyDerivation.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package rfc3961
-
-import (
-	"bytes"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-)
-
-const (
-	prfconstant = "prf"
-)
-
-// DeriveRandom implements the RFC 3961 defined function: DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)).
-//
-// key: base key or protocol key. Likely to be a key from a keytab file.
-//
-// usage: a constant.
-//
-// n: block size in bits (not bytes) - note if you use something like aes.BlockSize this is in bytes.
-//
-// k: key length / key seed length in bits. Eg. for AES256 this value is 256.
-//
-// e: the encryption etype function to use.
-func DeriveRandom(key, usage []byte, e etype.EType) ([]byte, error) {
-	n := e.GetCypherBlockBitLength()
-	k := e.GetKeySeedBitLength()
-	//Ensure the usage constant is at least the size of the cypher block size. Pass it through the nfold algorithm that will "stretch" it if needs be.
-	nFoldUsage := Nfold(usage, n)
-	//k-truncate implemented by creating a byte array the size of k (k is in bits hence /8)
-	out := make([]byte, k/8)
-
-	/*If the output	of E is shorter than k bits, it is fed back into the encryption as many times as necessary.
-	The construct is as follows (where | indicates concatenation):
-
-	K1 = E(Key, n-fold(Constant), initial-cipher-state)
-	K2 = E(Key, K1, initial-cipher-state)
-	K3 = E(Key, K2, initial-cipher-state)
-	K4 = ...
-
-	DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...)*/
-	_, K, err := e.EncryptData(key, nFoldUsage)
-	if err != nil {
-		return out, err
-	}
-	for i := copy(out, K); i < len(out); {
-		_, K, _ = e.EncryptData(key, K)
-		i = i + copy(out[i:], K)
-	}
-	return out, nil
-}
-
-// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods.
-func DeriveKey(protocolKey, usage []byte, e etype.EType) ([]byte, error) {
-	r, err := e.DeriveRandom(protocolKey, usage)
-	if err != nil {
-		return nil, err
-	}
-	return e.RandomToKey(r), nil
-}
-
-// RandomToKey returns a key from the bytes provided according to the definition in RFC 3961.
-func RandomToKey(b []byte) []byte {
-	return b
-}
-
-// DES3RandomToKey returns a key from the bytes provided according to the definition in RFC 3961 for DES3 etypes.
-func DES3RandomToKey(b []byte) []byte {
-	r := fixWeakKey(stretch56Bits(b[:7]))
-	r2 := fixWeakKey(stretch56Bits(b[7:14]))
-	r = append(r, r2...)
-	r3 := fixWeakKey(stretch56Bits(b[14:21]))
-	r = append(r, r3...)
-	return r
-}
-
-// DES3StringToKey returns a key derived from the string provided according to the definition in RFC 3961 for DES3 etypes.
-func DES3StringToKey(secret, salt string, e etype.EType) ([]byte, error) {
-	s := secret + salt
-	tkey := e.RandomToKey(Nfold([]byte(s), e.GetKeySeedBitLength()))
-	return e.DeriveKey(tkey, []byte("kerberos"))
-}
-
-// PseudoRandom function as defined in RFC 3961
-func PseudoRandom(key, b []byte, e etype.EType) ([]byte, error) {
-	h := e.GetHashFunc()()
-	h.Write(b)
-	tmp := h.Sum(nil)[:e.GetMessageBlockByteSize()]
-	k, err := e.DeriveKey(key, []byte(prfconstant))
-	if err != nil {
-		return []byte{}, err
-	}
-	_, prf, err := e.EncryptData(k, tmp)
-	if err != nil {
-		return []byte{}, err
-	}
-	return prf, nil
-}
-
-func stretch56Bits(b []byte) []byte {
-	d := make([]byte, len(b), len(b))
-	copy(d, b)
-	var lb byte
-	for i, v := range d {
-		bv, nb := calcEvenParity(v)
-		d[i] = nb
-		if bv != 0 {
-			lb = lb | (1 << uint(i+1))
-		} else {
-			lb = lb &^ (1 << uint(i+1))
-		}
-	}
-	_, lb = calcEvenParity(lb)
-	d = append(d, lb)
-	return d
-}
-
-func calcEvenParity(b byte) (uint8, uint8) {
-	lowestbit := b & 0x01
-	// c counter of 1s in the first 7 bits of the byte
-	var c int
-	// Iterate over the highest 7 bits (hence p starts at 1 not zero) and count the 1s.
-	for p := 1; p < 8; p++ {
-		v := b & (1 << uint(p))
-		if v != 0 {
-			c++
-		}
-	}
-	if c%2 == 0 {
-		//Even number of 1s so set parity to 1
-		b = b | 1
-	} else {
-		//Odd number of 1s so set parity to 0
-		b = b &^ 1
-	}
-	return lowestbit, b
-}
-
-func fixWeakKey(b []byte) []byte {
-	if weak(b) {
-		b[7] ^= 0xF0
-	}
-	return b
-}
-
-func weak(b []byte) bool {
-	// weak keys from https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-67r1.pdf
-	weakKeys := [4][]byte{
-		{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
-		{0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE},
-		{0xE0, 0xE0, 0xE0, 0xE0, 0xF1, 0xF1, 0xF1, 0xF1},
-		{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
-	}
-	semiWeakKeys := [12][]byte{
-		{0x01, 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E},
-		{0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E, 0x01},
-		{0x01, 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1},
-		{0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1, 0x01},
-		{0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE},
-		{0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01},
-		{0x1F, 0xE0, 0x1F, 0xE0, 0x0E, 0xF1, 0x0E, 0xF1},
-		{0xE0, 0x1F, 0xE0, 0x1F, 0xF1, 0x0E, 0xF1, 0x0E},
-		{0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E, 0xFE},
-		{0xFE, 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E},
-		{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
-		{0xFE, 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1},
-	}
-	for _, k := range weakKeys {
-		if bytes.Equal(b, k) {
-			return true
-		}
-	}
-	for _, k := range semiWeakKeys {
-		if bytes.Equal(b, k) {
-			return true
-		}
-	}
-	return false
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/nfold.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/nfold.go
deleted file mode 100644
index 779d1c6..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3961/nfold.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package rfc3961
-
-/*
-Implementation of the n-fold algorithm as defined in RFC 3961.
-
-n-fold is an algorithm that takes m input bits and "stretches" them
-to form n output bits with equal contribution from each input bit to
-the output, as described in [Blumenthal96]:
-
-We first define a primitive called n-folding, which takes a
-variable-length input block and produces a fixed-length output
-sequence.  The intent is to give each input bit approximately
-equal weight in determining the value of each output bit.  Note
-that whenever we need to treat a string of octets as a number, the
-assumed representation is Big-Endian -- Most Significant Byte
-first.
-
-To n-fold a number X, replicate the input value to a length that
-is the least common multiple of n and the length of X.  Before
-each repetition, the input is rotated to the right by 13 bit
-positions.  The successive n-bit chunks are added together using
-1's-complement addition (that is, with end-around carry) to yield
-a n-bit result....
-*/
-
-/* Credits
-This golang implementation of nfold used the following project for help with implementation detail.
-Although their source is in java it was helpful as a reference implementation of the RFC.
-You can find the source code of their open source project along with license information below.
-We acknowledge and are grateful to these developers for their contributions to open source
-
-Project: Apache Directory (http://http://directory.apache.org/)
-https://svn.apache.org/repos/asf/directory/apacheds/tags/1.5.1/kerberos-shared/src/main/java/org/apache/directory/server/kerberos/shared/crypto/encryption/NFold.java
-License: http://www.apache.org/licenses/LICENSE-2.0
-*/
-
-// Nfold expands the key to ensure it is not smaller than one cipher block.
-// Defined in RFC 3961.
-//
-// m input bytes that will be "stretched" to the least common multiple of n bits and the bit length of m.
-func Nfold(m []byte, n int) []byte {
-	k := len(m) * 8
-
-	//Get the lowest common multiple of the two bit sizes
-	lcm := lcm(n, k)
-	relicate := lcm / k
-	var sumBytes []byte
-
-	for i := 0; i < relicate; i++ {
-		rotation := 13 * i
-		sumBytes = append(sumBytes, rotateRight(m, rotation)...)
-	}
-
-	nfold := make([]byte, n/8)
-	sum := make([]byte, n/8)
-	for i := 0; i < lcm/n; i++ {
-		for j := 0; j < n/8; j++ {
-			sum[j] = sumBytes[j+(i*len(sum))]
-		}
-		nfold = onesComplementAddition(nfold, sum)
-	}
-	return nfold
-}
-
-func onesComplementAddition(n1, n2 []byte) []byte {
-	numBits := len(n1) * 8
-	out := make([]byte, numBits/8)
-	carry := 0
-	for i := numBits - 1; i > -1; i-- {
-		n1b := getBit(&n1, i)
-		n2b := getBit(&n2, i)
-		s := n1b + n2b + carry
-
-		if s == 0 || s == 1 {
-			setBit(&out, i, s)
-			carry = 0
-		} else if s == 2 {
-			carry = 1
-		} else if s == 3 {
-			setBit(&out, i, 1)
-			carry = 1
-		}
-	}
-	if carry == 1 {
-		carryArray := make([]byte, len(n1))
-		carryArray[len(carryArray)-1] = 1
-		out = onesComplementAddition(out, carryArray)
-	}
-	return out
-}
-
-func rotateRight(b []byte, step int) []byte {
-	out := make([]byte, len(b))
-	bitLen := len(b) * 8
-	for i := 0; i < bitLen; i++ {
-		v := getBit(&b, i)
-		setBit(&out, (i+step)%bitLen, v)
-	}
-	return out
-}
-
-func lcm(x, y int) int {
-	return (x * y) / gcd(x, y)
-}
-
-func gcd(x, y int) int {
-	for y != 0 {
-		x, y = y, x%y
-	}
-	return x
-}
-
-func getBit(b *[]byte, p int) int {
-	pByte := p / 8
-	pBit := uint(p % 8)
-	vByte := (*b)[pByte]
-	vInt := int(vByte >> (8 - (pBit + 1)) & 0x0001)
-	return vInt
-}
-
-func setBit(b *[]byte, p, v int) {
-	pByte := p / 8
-	pBit := uint(p % 8)
-	oldByte := (*b)[pByte]
-	var newByte byte
-	newByte = byte(v<<(8-(pBit+1))) | oldByte
-	(*b)[pByte] = newByte
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/encryption.go
deleted file mode 100644
index 2be2fde..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/encryption.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Package rfc3962 provides encryption and checksum methods as specified in RFC 3962
-package rfc3962
-
-import (
-	"crypto/rand"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/aescts.v1"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-)
-
-// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 3962.
-func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) {
-	if len(key) != e.GetKeyByteSize() {
-		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
-	}
-	ivz := make([]byte, e.GetCypherBlockBitLength()/8)
-	return aescts.Encrypt(key, ivz, data)
-}
-
-// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 3962.
-// The encrypted data is concatenated with its integrity hash to create an encrypted message.
-func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) {
-	if len(key) != e.GetKeyByteSize() {
-		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
-	}
-	//confounder
-	c := make([]byte, e.GetConfounderByteSize())
-	_, err := rand.Read(c)
-	if err != nil {
-		return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err)
-	}
-	plainBytes := append(c, message...)
-
-	// Derive key for encryption from usage
-	var k []byte
-	if usage != 0 {
-		k, err = e.DeriveKey(key, common.GetUsageKe(usage))
-		if err != nil {
-			return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err)
-		}
-	}
-
-	// Encrypt the data
-	iv, b, err := e.EncryptData(k, plainBytes)
-	if err != nil {
-		return iv, b, fmt.Errorf("error encrypting data: %v", err)
-	}
-
-	// Generate and append integrity hash
-	ih, err := common.GetIntegrityHash(plainBytes, key, usage, e)
-	if err != nil {
-		return iv, b, fmt.Errorf("error encrypting data: %v", err)
-	}
-	b = append(b, ih...)
-	return iv, b, nil
-}
-
-// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 3962.
-func DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
-	if len(key) != e.GetKeyByteSize() {
-		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
-	}
-	ivz := make([]byte, e.GetCypherBlockBitLength()/8)
-	return aescts.Decrypt(key, ivz, data)
-}
-
-// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 3962.
-// The integrity of the message is also verified.
-func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) {
-	//Derive the key
-	k, err := e.DeriveKey(key, common.GetUsageKe(usage))
-	if err != nil {
-		return nil, fmt.Errorf("error deriving key: %v", err)
-	}
-	// Strip off the checksum from the end
-	b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8])
-	if err != nil {
-		return nil, err
-	}
-	//Verify checksum
-	if !e.VerifyIntegrity(key, ciphertext, b, usage) {
-		return nil, errors.New("integrity verification failed")
-	}
-	//Remove the confounder bytes
-	return b[e.GetConfounderByteSize():], nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/keyDerivation.go
deleted file mode 100644
index a5f45c1..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc3962/keyDerivation.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package rfc3962
-
-import (
-	"encoding/binary"
-	"encoding/hex"
-	"errors"
-
-	"github.com/jcmturner/gofork/x/crypto/pbkdf2"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-)
-
-const (
-	s2kParamsZero = 4294967296
-)
-
-// StringToKey returns a key derived from the string provided according to the definition in RFC 3961.
-func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) {
-	i, err := S2KparamsToItertions(s2kparams)
-	if err != nil {
-		return nil, err
-	}
-	return StringToKeyIter(secret, salt, i, e)
-}
-
-// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0
-func StringToPBKDF2(secret, salt string, iterations int64, e etype.EType) []byte {
-	return pbkdf2.Key64([]byte(secret), []byte(salt), iterations, int64(e.GetKeyByteSize()), e.GetHashFunc())
-}
-
-// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 3961.
-func StringToKeyIter(secret, salt string, iterations int64, e etype.EType) ([]byte, error) {
-	tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e))
-	return e.DeriveKey(tkey, []byte("kerberos"))
-}
-
-// S2KparamsToItertions converts the string representation of iterations to an integer
-func S2KparamsToItertions(s2kparams string) (int64, error) {
-	//process s2kparams string
-	//The parameter string is four octets indicating an unsigned
-	//number in big-endian order.  This is the number of iterations to be
-	//performed.  If the value is 00 00 00 00, the number of iterations to
-	//be performed is 4,294,967,296 (2**32).
-	var i uint32
-	if len(s2kparams) != 8 {
-		return int64(s2kParamsZero), errors.New("invalid s2kparams length")
-	}
-	b, err := hex.DecodeString(s2kparams)
-	if err != nil {
-		return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot decode string to bytes")
-	}
-	i = binary.BigEndian.Uint32(b)
-	//buf := bytes.NewBuffer(b)
-	//err = binary.Read(buf, binary.BigEndian, &i)
-	if err != nil {
-		return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot convert to big endian int32")
-	}
-	return int64(i), nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/checksum.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/checksum.go
deleted file mode 100644
index 45276e9..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/checksum.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package rfc4757
-
-import (
-	"bytes"
-	"crypto/hmac"
-	"crypto/md5"
-	"io"
-)
-
-// Checksum returns a hash of the data in accordance with RFC 4757
-func Checksum(key []byte, usage uint32, data []byte) ([]byte, error) {
-	// Create hashing key
-	s := append([]byte(`signaturekey`), byte(0x00)) //includes zero octet at end
-	mac := hmac.New(md5.New, key)
-	mac.Write(s)
-	Ksign := mac.Sum(nil)
-
-	// Format data
-	tb := UsageToMSMsgType(usage)
-	p := append(tb, data...)
-	h := md5.New()
-	rb := bytes.NewReader(p)
-	_, err := io.Copy(h, rb)
-	if err != nil {
-		return []byte{}, err
-	}
-	tmp := h.Sum(nil)
-
-	// Generate HMAC
-	mac = hmac.New(md5.New, Ksign)
-	mac.Write(tmp)
-	return mac.Sum(nil), nil
-}
-
-// HMAC returns a keyed MD5 checksum of the data
-func HMAC(key []byte, data []byte) []byte {
-	mac := hmac.New(md5.New, key)
-	mac.Write(data)
-	return mac.Sum(nil)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/encryption.go
deleted file mode 100644
index 0ec8b99..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/encryption.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package rfc4757 provides encryption and checksum methods as specified in RFC 4757
-package rfc4757
-
-import (
-	"crypto/hmac"
-	"crypto/rand"
-	"crypto/rc4"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-)
-
-// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 4757.
-func EncryptData(key, data []byte, e etype.EType) ([]byte, error) {
-	if len(key) != e.GetKeyByteSize() {
-		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
-	}
-	rc4Cipher, err := rc4.NewCipher(key)
-	if err != nil {
-		return []byte{}, fmt.Errorf("error creating RC4 cipher: %v", err)
-	}
-	ed := make([]byte, len(data))
-	copy(ed, data)
-	rc4Cipher.XORKeyStream(ed, ed)
-	rc4Cipher.Reset()
-	return ed, nil
-}
-
-// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 4757.
-func DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
-	return EncryptData(key, data, e)
-}
-
-// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 4757.
-// The encrypted data is concatenated with its RC4 header containing integrity checksum and confounder to create an encrypted message.
-func EncryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) {
-	confounder := make([]byte, e.GetConfounderByteSize()) // size = 8
-	_, err := rand.Read(confounder)
-	if err != nil {
-		return []byte{}, fmt.Errorf("error generating confounder: %v", err)
-	}
-	k1 := key
-	k2 := HMAC(k1, UsageToMSMsgType(usage))
-	toenc := append(confounder, data...)
-	chksum := HMAC(k2, toenc)
-	k3 := HMAC(k2, chksum)
-
-	ed, err := EncryptData(k3, toenc, e)
-	if err != nil {
-		return []byte{}, fmt.Errorf("error encrypting data: %v", err)
-	}
-
-	msg := append(chksum, ed...)
-	return msg, nil
-}
-
-// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 4757.
-// The integrity of the message is also verified.
-func DecryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) {
-	checksum := data[:e.GetHMACBitLength()/8]
-	ct := data[e.GetHMACBitLength()/8:]
-	_, k2, k3 := deriveKeys(key, checksum, usage, export)
-
-	pt, err := DecryptData(k3, ct, e)
-	if err != nil {
-		return []byte{}, fmt.Errorf("error decrypting data: %v", err)
-	}
-
-	if !VerifyIntegrity(k2, pt, data, e) {
-		return []byte{}, errors.New("integrity checksum incorrect")
-	}
-	return pt[e.GetConfounderByteSize():], nil
-}
-
-// VerifyIntegrity checks the integrity checksum of the data matches that calculated from the decrypted data.
-func VerifyIntegrity(key, pt, data []byte, e etype.EType) bool {
-	chksum := HMAC(key, pt)
-	return hmac.Equal(chksum, data[:e.GetHMACBitLength()/8])
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/keyDerivation.go
deleted file mode 100644
index 5e7ec48..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/keyDerivation.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package rfc4757
-
-import (
-	"bytes"
-	"encoding/hex"
-	"errors"
-	"fmt"
-	"io"
-
-	"golang.org/x/crypto/md4"
-)
-
-// StringToKey returns a key derived from the string provided according to the definition in RFC 4757.
-func StringToKey(secret string) ([]byte, error) {
-	b := make([]byte, len(secret)*2, len(secret)*2)
-	for i, r := range secret {
-		u := fmt.Sprintf("%04x", r)
-		c, err := hex.DecodeString(u)
-		if err != nil {
-			return []byte{}, errors.New("character could not be encoded")
-		}
-		// Swap round the two bytes to make little endian as we put into byte slice
-		b[2*i] = c[1]
-		b[2*i+1] = c[0]
-	}
-	r := bytes.NewReader(b)
-	h := md4.New()
-	_, err := io.Copy(h, r)
-	if err != nil {
-		return []byte{}, err
-	}
-	return h.Sum(nil), nil
-}
-
-func deriveKeys(key, checksum []byte, usage uint32, export bool) (k1, k2, k3 []byte) {
-	//if export {
-	//	L40 := make([]byte, 14, 14)
-	//	copy(L40, []byte(`fortybits`))
-	//	k1 = HMAC(key, L40)
-	//} else {
-	//	tb := MessageTypeBytes(usage)
-	//	k1 = HMAC(key, tb)
-	//}
-	//k2 = k1[:16]
-	//if export {
-	//	mask := []byte{0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB}
-	//	copy(k1[7:16], mask)
-	//}
-	//k3 = HMAC(k1, checksum)
-	//return
-	k1 = key
-	k2 = HMAC(k1, UsageToMSMsgType(usage))
-	k3 = HMAC(k2, checksum)
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/msgtype.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/msgtype.go
deleted file mode 100644
index 068588d..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc4757/msgtype.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package rfc4757
-
-import "encoding/binary"
-
-// UsageToMSMsgType converts Kerberos key usage numbers to Microsoft message type encoded as a little-endian four byte slice.
-func UsageToMSMsgType(usage uint32) []byte {
-	// Translate usage numbers to the Microsoft T numbers
-	switch usage {
-	case 3:
-		usage = 8
-	case 9:
-		usage = 8
-	case 23:
-		usage = 13
-	}
-	// Now convert to bytes
-	tb := make([]byte, 4) // We force an int32 input so we can't go over 4 bytes
-	binary.PutUvarint(tb, uint64(usage))
-	return tb
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/encryption.go
deleted file mode 100644
index 86aae09..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/encryption.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Package rfc8009 provides encryption and checksum methods as specified in RFC 8009
-package rfc8009
-
-import (
-	"crypto/aes"
-	"crypto/hmac"
-	"crypto/rand"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/aescts.v1"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/common"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 8009.
-func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) {
-	kl := e.GetKeyByteSize()
-	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
-		kl = 32
-	}
-	if len(key) != kl {
-		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key))
-	}
-	ivz := make([]byte, aes.BlockSize)
-	return aescts.Encrypt(key, ivz, data)
-}
-
-// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 8009.
-// The encrypted data is concatenated with its integrity hash to create an encrypted message.
-func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) {
-	kl := e.GetKeyByteSize()
-	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
-		kl = 32
-	}
-	if len(key) != kl {
-		return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key))
-	}
-	if len(key) != e.GetKeyByteSize() {
-	}
-	//confounder
-	c := make([]byte, e.GetConfounderByteSize())
-	_, err := rand.Read(c)
-	if err != nil {
-		return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err)
-	}
-	plainBytes := append(c, message...)
-
-	// Derive key for encryption from usage
-	var k []byte
-	if usage != 0 {
-		k, err = e.DeriveKey(key, common.GetUsageKe(usage))
-		if err != nil {
-			return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err)
-		}
-	}
-
-	// Encrypt the data
-	iv, b, err := e.EncryptData(k, plainBytes)
-	if err != nil {
-		return iv, b, fmt.Errorf("error encrypting data: %v", err)
-	}
-
-	ivz := make([]byte, e.GetConfounderByteSize())
-	ih, err := GetIntegityHash(ivz, b, key, usage, e)
-	if err != nil {
-		return iv, b, fmt.Errorf("error encrypting data: %v", err)
-	}
-	b = append(b, ih...)
-	return iv, b, nil
-}
-
-// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 8009.
-func DecryptData(key, data []byte, e etype.EType) ([]byte, error) {
-	kl := e.GetKeyByteSize()
-	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
-		kl = 32
-	}
-	if len(key) != kl {
-		return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key))
-	}
-	ivz := make([]byte, aes.BlockSize)
-	return aescts.Decrypt(key, ivz, data)
-}
-
-// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 8009.
-// The integrity of the message is also verified.
-func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) {
-	//Derive the key
-	k, err := e.DeriveKey(key, common.GetUsageKe(usage))
-	if err != nil {
-		return nil, fmt.Errorf("error deriving key: %v", err)
-	}
-	// Strip off the checksum from the end
-	b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8])
-	if err != nil {
-		return nil, err
-	}
-	//Verify checksum
-	if !e.VerifyIntegrity(key, ciphertext, b, usage) {
-		return nil, errors.New("integrity verification failed")
-	}
-	//Remove the confounder bytes
-	return b[e.GetConfounderByteSize():], nil
-}
-
-// GetIntegityHash returns a keyed integrity hash of the bytes provided as defined in RFC 8009
-func GetIntegityHash(iv, c, key []byte, usage uint32, e etype.EType) ([]byte, error) {
-	// Generate and append integrity hash
-	// The HMAC is calculated over the cipher state concatenated with the
-	// AES output, instead of being calculated over the confounder and
-	// plaintext.  This allows the message receiver to verify the
-	// integrity of the message before decrypting the message.
-	// H = HMAC(Ki, IV | C)
-	ib := append(iv, c...)
-	return common.GetIntegrityHash(ib, key, usage, e)
-}
-
-// VerifyIntegrity verifies the integrity of cipertext bytes ct.
-func VerifyIntegrity(key, ct []byte, usage uint32, etype etype.EType) bool {
-	h := make([]byte, etype.GetHMACBitLength()/8)
-	copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:])
-	ivz := make([]byte, etype.GetConfounderByteSize())
-	ib := append(ivz, ct[:len(ct)-(etype.GetHMACBitLength()/8)]...)
-	expectedMAC, _ := common.GetIntegrityHash(ib, key, usage, etype)
-	return hmac.Equal(h, expectedMAC)
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/keyDerivation.go
deleted file mode 100644
index 90ced3b..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/crypto/rfc8009/keyDerivation.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package rfc8009
-
-import (
-	"crypto/hmac"
-	"encoding/binary"
-	"encoding/hex"
-	"errors"
-
-	"golang.org/x/crypto/pbkdf2"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto/etype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/etypeID"
-)
-
-const (
-	s2kParamsZero = 32768
-)
-
-// DeriveRandom for key derivation as defined in RFC 8009
-func DeriveRandom(protocolKey, usage []byte, e etype.EType) ([]byte, error) {
-	h := e.GetHashFunc()()
-	return KDF_HMAC_SHA2(protocolKey, []byte("prf"), usage, h.Size(), e), nil
-}
-
-// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods.
-//
-// https://tools.ietf.org/html/rfc8009#section-5
-//
-// If the enctype is aes128-cts-hmac-sha256-128:
-// Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128)
-// Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128)
-// Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128)
-//
-// If the enctype is aes256-cts-hmac-sha384-192:
-// Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192)
-// Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256)
-// Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192)
-func DeriveKey(protocolKey, label []byte, e etype.EType) []byte {
-	var context []byte
-	var kl int
-	// Key length is longer for aes256-cts-hmac-sha384-192 is it is a Ke or from StringToKey (where label is "kerberos")
-	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
-		switch label[len(label)-1] {
-		case 0x73:
-			// 0x73 is "s" so label could be kerberos meaning StringToKey so now check if the label is "kerberos"
-			kerblabel := []byte("kerberos")
-			if len(label) != len(kerblabel) {
-				break
-			}
-			for i, b := range label {
-				if b != kerblabel[i] {
-					kl = e.GetKeySeedBitLength()
-					break
-				}
-			}
-			if kl == 0 {
-				// This is StringToKey
-				kl = 256
-			}
-		case 0xAA:
-			// This is a Ke
-			kl = 256
-		}
-	}
-	if kl == 0 {
-		kl = e.GetKeySeedBitLength()
-	}
-	return e.RandomToKey(KDF_HMAC_SHA2(protocolKey, label, context, kl, e))
-}
-
-// RandomToKey returns a key from the bytes provided according to the definition in RFC 8009.
-func RandomToKey(b []byte) []byte {
-	return b
-}
-
-// StringToKey returns a key derived from the string provided according to the definition in RFC 8009.
-func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) {
-	i, err := S2KparamsToItertions(s2kparams)
-	if err != nil {
-		return nil, err
-	}
-	return StringToKeyIter(secret, salt, i, e)
-}
-
-// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 8009.
-func StringToKeyIter(secret, salt string, iterations int, e etype.EType) ([]byte, error) {
-	tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e))
-	return e.DeriveKey(tkey, []byte("kerberos"))
-}
-
-// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0
-func StringToPBKDF2(secret, salt string, iterations int, e etype.EType) []byte {
-	kl := e.GetKeyByteSize()
-	if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 {
-		kl = 32
-	}
-	return pbkdf2.Key([]byte(secret), []byte(salt), iterations, kl, e.GetHashFunc())
-}
-
-// KDF_HMAC_SHA2 key derivation: https://tools.ietf.org/html/rfc8009#section-3
-func KDF_HMAC_SHA2(protocolKey, label, context []byte, kl int, e etype.EType) []byte {
-	//k: Length in bits of the key to be outputted, expressed in big-endian binary representation in 4 bytes.
-	k := make([]byte, 4, 4)
-	binary.BigEndian.PutUint32(k, uint32(kl))
-
-	c := make([]byte, 4, 4)
-	binary.BigEndian.PutUint32(c, uint32(1))
-	c = append(c, label...)
-	c = append(c, byte(0))
-	if len(context) > 0 {
-		c = append(c, context...)
-	}
-	c = append(c, k...)
-
-	mac := hmac.New(e.GetHashFunc(), protocolKey)
-	mac.Write(c)
-	return mac.Sum(nil)[:(kl / 8)]
-}
-
-// GetSaltP returns the salt value based on the etype name: https://tools.ietf.org/html/rfc8009#section-4
-func GetSaltP(salt, ename string) string {
-	b := []byte(ename)
-	b = append(b, byte(0))
-	b = append(b, []byte(salt)...)
-	return string(b)
-}
-
-// S2KparamsToItertions converts the string representation of iterations to an integer for RFC 8009.
-func S2KparamsToItertions(s2kparams string) (int, error) {
-	var i uint32
-	if len(s2kparams) != 8 {
-		return s2kParamsZero, errors.New("Invalid s2kparams length")
-	}
-	b, err := hex.DecodeString(s2kparams)
-	if err != nil {
-		return s2kParamsZero, errors.New("Invalid s2kparams, cannot decode string to bytes")
-	}
-	i = binary.BigEndian.Uint32(b)
-	//buf := bytes.NewBuffer(b)
-	//err = binary.Read(buf, binary.BigEndian, &i)
-	if err != nil {
-		return s2kParamsZero, errors.New("Invalid s2kparams, cannot convert to big endian int32")
-	}
-	return int(i), nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/MICToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/MICToken.go
deleted file mode 100644
index 856412b..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/MICToken.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package gssapi
-
-import (
-	"bytes"
-	"crypto/hmac"
-	"encoding/binary"
-	"encoding/hex"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-/*
-From RFC 4121, section 4.2.6.1:
-
-   Use of the GSS_GetMIC() call yields a token (referred as the MIC
-   token in this document), separate from the user data being protected,
-   which can be used to verify the integrity of that data as received.
-   The token has the following format:
-
-         Octet no   Name        Description
-         --------------------------------------------------------------
-         0..1     TOK_ID     Identification field.  Tokens emitted by
-                             GSS_GetMIC() contain the hex value 04 04
-                             expressed in big-endian order in this
-                             field.
-         2        Flags      Attributes field, as described in section
-                             4.2.2.
-         3..7     Filler     Contains five octets of hex value FF.
-         8..15    SND_SEQ    Sequence number field in clear text,
-                             expressed in big-endian order.
-         16..last SGN_CKSUM  Checksum of the "to-be-signed" data and
-                             octet 0..15, as described in section 4.2.4.
-
-   The Filler field is included in the checksum calculation for
-   simplicity.
-
-*/
-
-const (
-	// MICTokenFlagSentByAcceptor - this flag indicates the sender is the context acceptor.  When not set, it indicates the sender is the context initiator
-	MICTokenFlagSentByAcceptor = 1 << iota
-	// MICTokenFlagSealed - this flag indicates confidentiality is provided for.  It SHALL NOT be set in MIC tokens
-	MICTokenFlagSealed
-	// MICTokenFlagAcceptorSubkey - a subkey asserted by the context acceptor is used to protect the message
-	MICTokenFlagAcceptorSubkey
-)
-
-const (
-	micHdrLen = 16 // Length of the MIC Token's header
-)
-
-// MICToken represents a GSS API MIC token, as defined in RFC 4121.
-// It contains the header fields, the payload (this is not transmitted) and
-// the checksum, and provides the logic for converting to/from bytes plus
-// computing and verifying checksums
-type MICToken struct {
-	// const GSS Token ID: 0x0404
-	Flags byte // contains three flags: acceptor, sealed, acceptor subkey
-	// const Filler: 0xFF 0xFF 0xFF 0xFF 0xFF
-	SndSeqNum uint64 // sender's sequence number. big-endian
-	Payload   []byte // your data! :)
-	Checksum  []byte // checksum of { payload | header }
-}
-
-// Return the 2 bytes identifying a GSS API MIC token
-func getGSSMICTokenID() *[2]byte {
-	return &[2]byte{0x04, 0x04}
-}
-
-// Return the filler bytes used in header
-func fillerBytes() *[5]byte {
-	return &[5]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
-}
-
-// Marshal the MICToken into a byte slice.
-// The payload should have been set and the checksum computed, otherwise an error is returned.
-func (mt *MICToken) Marshal() ([]byte, error) {
-	if mt.Checksum == nil {
-		return nil, errors.New("checksum has not been set")
-	}
-
-	bytes := make([]byte, micHdrLen+len(mt.Checksum))
-	copy(bytes[0:micHdrLen], mt.getMICChecksumHeader()[:])
-	copy(bytes[micHdrLen:], mt.Checksum)
-
-	return bytes, nil
-}
-
-// SetChecksum uses the passed encryption key and key usage to compute the checksum over the payload and
-// the header, and sets the Checksum field of this MICToken.
-// If the payload has not been set or the checksum has already been set, an error is returned.
-func (mt *MICToken) SetChecksum(key types.EncryptionKey, keyUsage uint32) error {
-	if mt.Checksum != nil {
-		return errors.New("checksum has already been computed")
-	}
-	checksum, err := mt.checksum(key, keyUsage)
-	if err != nil {
-		return err
-	}
-	mt.Checksum = checksum
-	return nil
-}
-
-// Compute and return the checksum of this token, computed using the passed key and key usage.
-// Confirms to RFC 4121 in that the checksum will be computed over { body | header }.
-// In the context of Kerberos MIC tokens, mostly keyusage GSSAPI_ACCEPTOR_SIGN (=23)
-// and GSSAPI_INITIATOR_SIGN (=25) will be used.
-// Note: This will NOT update the struct's Checksum field.
-func (mt *MICToken) checksum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) {
-	if mt.Payload == nil {
-		return nil, errors.New("cannot compute checksum with uninitialized payload")
-	}
-	d := make([]byte, micHdrLen+len(mt.Payload))
-	copy(d[0:], mt.Payload)
-	copy(d[len(mt.Payload):], mt.getMICChecksumHeader())
-
-	encType, err := crypto.GetEtype(key.KeyType)
-	if err != nil {
-		return nil, err
-	}
-	return encType.GetChecksumHash(key.KeyValue, d, keyUsage)
-}
-
-// Build a header suitable for a checksum computation
-func (mt *MICToken) getMICChecksumHeader() []byte {
-	header := make([]byte, micHdrLen)
-	copy(header[0:2], getGSSMICTokenID()[:])
-	header[2] = mt.Flags
-	copy(header[3:8], fillerBytes()[:])
-	binary.BigEndian.PutUint64(header[8:16], mt.SndSeqNum)
-	return header
-}
-
-// Verify computes the token's checksum with the provided key and usage,
-// and compares it to the checksum present in the token.
-// In case of any failure, (false, err) is returned, with err an explanatory error.
-func (mt *MICToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) {
-	computed, err := mt.checksum(key, keyUsage)
-	if err != nil {
-		return false, err
-	}
-	if !hmac.Equal(computed, mt.Checksum) {
-		return false, fmt.Errorf(
-			"checksum mismatch. Computed: %s, Contained in token: %s",
-			hex.EncodeToString(computed), hex.EncodeToString(mt.Checksum))
-	}
-	return true, nil
-}
-
-// Unmarshal bytes into the corresponding MICToken.
-// If expectFromAcceptor is true we expect the token to have been emitted by the gss acceptor,
-// and will check the according flag, returning an error if the token does not match the expectation.
-func (mt *MICToken) Unmarshal(b []byte, expectFromAcceptor bool) error {
-	if len(b) < micHdrLen {
-		return errors.New("bytes shorter than header length")
-	}
-	if !bytes.Equal(getGSSMICTokenID()[:], b[0:2]) {
-		return fmt.Errorf("wrong Token ID, Expected %s, was %s",
-			hex.EncodeToString(getGSSMICTokenID()[:]),
-			hex.EncodeToString(b[0:2]))
-	}
-	flags := b[2]
-	isFromAcceptor := flags&MICTokenFlagSentByAcceptor != 0
-	if isFromAcceptor && !expectFromAcceptor {
-		return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor")
-	}
-	if !isFromAcceptor && expectFromAcceptor {
-		return errors.New("unexpected acceptor flag is not set: expecting a token from the acceptor, not in the initiator")
-	}
-	if !bytes.Equal(b[3:8], fillerBytes()[:]) {
-		return fmt.Errorf("unexpected filler bytes: expecting %s, was %s",
-			hex.EncodeToString(fillerBytes()[:]),
-			hex.EncodeToString(b[3:8]))
-	}
-
-	mt.Flags = flags
-	mt.SndSeqNum = binary.BigEndian.Uint64(b[8:16])
-	mt.Checksum = b[micHdrLen:]
-	return nil
-}
-
-// NewInitiatorMICToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum.
-// Other flags are set to 0.
-// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier.
-// This is currently not supported.
-func NewInitiatorMICToken(payload []byte, key types.EncryptionKey) (*MICToken, error) {
-	token := MICToken{
-		Flags:     0x00,
-		SndSeqNum: 0,
-		Payload:   payload,
-	}
-
-	if err := token.SetChecksum(key, keyusage.GSSAPI_INITIATOR_SIGN); err != nil {
-		return nil, err
-	}
-
-	return &token, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/README.md b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/README.md
deleted file mode 100644
index 8fdcf70..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# Notes on GSS-API Negotiation Mechanism
-https://tools.ietf.org/html/rfc4178
-
-Client sends an initial negotiation message to the server which specifies the list of mechanisms 
-the client can support in order of decreasing preference.
-This message is generated with the ``NewNegTokenInitKrb5`` method.
-The message generated by this function specifies only a kerberos v5 mechanism is supported.
-
-The RFC states that this message can optionally contain the initial mechanism token 
-for the preferred mechanism (KRB5 in this case) of the client. The ``NewNegTokenInitKrb5`` 
-includes this in the message.
-
-The server side responds to this message with a one of four messages:
-
-| Message Type/State | Description |
-|--------------------|-------------|
-| accept-completed | indicates that the initiator-selected mechanism was acceptable to the target, and that the security mechanism token embedded in the first negotiation message was sufficient to complete the authentication |
-| accept-incomplete | At least one more message is needed from the client to establish security context. |
-| reject | Negotiation is being terminated. |
-| request-mic | (this state can only be present in the first reply message from the target) indicates that the MIC token exchange is REQUIRED if per-message integrity services are available |
\ No newline at end of file
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/contextFlags.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/contextFlags.go
deleted file mode 100644
index 6634c6d..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/contextFlags.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package gssapi
-
-import "github.com/jcmturner/gofork/encoding/asn1"
-
-// GSS-API context flags assigned numbers.
-const (
-	ContextFlagDeleg    = 1
-	ContextFlagMutual   = 2
-	ContextFlagReplay   = 4
-	ContextFlagSequence = 8
-	ContextFlagConf     = 16
-	ContextFlagInteg    = 32
-	ContextFlagAnon     = 64
-)
-
-// ContextFlags flags for GSSAPI
-type ContextFlags asn1.BitString
-
-// NewContextFlags creates a new ContextFlags instance.
-func NewContextFlags() ContextFlags {
-	var c ContextFlags
-	c.BitLength = 32
-	c.Bytes = make([]byte, 4)
-	return c
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/gssapi.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/gssapi.go
deleted file mode 100644
index 47754d7..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/gssapi.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Package gssapi implements Generic Security Services Application Program Interface required for SPNEGO kerberos authentication.
-package gssapi
-
-import (
-	"context"
-	"fmt"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-)
-
-// GSS-API OID names
-const (
-	// GSS-API OID names
-	OIDKRB5         OIDName = "KRB5"         // MechType OID for Kerberos 5
-	OIDMSLegacyKRB5 OIDName = "MSLegacyKRB5" // MechType OID for Kerberos 5
-	OIDSPNEGO       OIDName = "SPNEGO"
-)
-
-// GSS-API status values
-const (
-	StatusBadBindings = 1 << iota
-	StatusBadMech
-	StatusBadName
-	StatusBadNameType
-	StatusBadStatus
-	StatusBadSig
-	StatusBadMIC
-	StatusContextExpired
-	StatusCredentialsExpired
-	StatusDefectiveCredential
-	StatusDefectiveToken
-	StatusFailure
-	StatusNoContext
-	StatusNoCred
-	StatusBadQOP
-	StatusUnauthorized
-	StatusUnavailable
-	StatusDuplicateElement
-	StatusNameNotMN
-	StatusComplete
-	StatusContinueNeeded
-	StatusDuplicateToken
-	StatusOldToken
-	StatusUnseqToken
-	StatusGapToken
-)
-
-// ContextToken is an interface for a GSS-API context token.
-type ContextToken interface {
-	Marshal() ([]byte, error)
-	Unmarshal(b []byte) error
-	Verify() (bool, Status)
-	Context() context.Context
-}
-
-/*
-CREDENTIAL MANAGEMENT
-
-GSS_Acquire_cred             acquire credentials for use
-GSS_Release_cred             release credentials after use
-GSS_Inquire_cred             display information about credentials
-GSS_Add_cred                 construct credentials incrementally
-GSS_Inquire_cred_by_mech     display per-mechanism credential information
-
-CONTEXT-LEVEL CALLS
-
-GSS_Init_sec_context         initiate outbound security context
-GSS_Accept_sec_context       accept inbound security context
-GSS_Delete_sec_context       flush context when no longer needed
-GSS_Process_context_token    process received control token on context
-GSS_Context_time             indicate validity time remaining on context
-GSS_Inquire_context          display information about context
-GSS_Wrap_size_limit          determine GSS_Wrap token size limit
-GSS_Export_sec_context       transfer context to other process
-GSS_Import_sec_context       import transferred context
-
-PER-MESSAGE CALLS
-
-GSS_GetMIC                   apply integrity check, receive as token separate from message
-GSS_VerifyMIC                validate integrity check token along with message
-GSS_Wrap                     sign, optionally encrypt, encapsulate
-GSS_Unwrap                   decapsulate, decrypt if needed, validate integrity check
-
-SUPPORT CALLS
-
-GSS_Display_status           translate status codes to printable form
-GSS_Indicate_mechs           indicate mech_types supported on local system
-GSS_Compare_name             compare two names for equality
-GSS_Display_name             translate name to printable form
-GSS_Import_name              convert printable name to normalized form
-GSS_Release_name             free storage of normalized-form name
-GSS_Release_buffer           free storage of general GSS-allocated object
-GSS_Release_OID_set          free storage of OID set object
-GSS_Create_empty_OID_set     create empty OID set
-GSS_Add_OID_set_member       add member to OID set
-GSS_Test_OID_set_member      test if OID is member of OID set
-GSS_Inquire_names_for_mech   indicate name types supported by mechanism
-GSS_Inquire_mechs_for_name   indicates mechanisms supporting name type
-GSS_Canonicalize_name        translate name to per-mechanism form
-GSS_Export_name              externalize per-mechanism name
-GSS_Duplicate_name           duplicate name object
-*/
-
-// Mechanism is the GSS-API interface for authentication mechanisms.
-type Mechanism interface {
-	OID() asn1.ObjectIdentifier
-	AcquireCred() error                                               // acquire credentials for use (eg. AS exchange for KRB5)
-	InitSecContext() (ContextToken, error)                            // initiate outbound security context (eg TGS exchange builds AP_REQ to go into ContextToken to send to service)
-	AcceptSecContext(ct ContextToken) (bool, context.Context, Status) // service verifies the token server side to establish a context
-	MIC() MICToken                                                    // apply integrity check, receive as token separate from message
-	VerifyMIC(mt MICToken) (bool, error)                              // validate integrity check token along with message
-	Wrap(msg []byte) WrapToken                                        // sign, optionally encrypt, encapsulate
-	Unwrap(wt WrapToken) []byte                                       // decapsulate, decrypt if needed, validate integrity check
-}
-
-// OIDName is the type for defined GSS-API OIDs.
-type OIDName string
-
-// OID returns the OID for the provided OID name.
-func OID(o OIDName) asn1.ObjectIdentifier {
-	switch o {
-	case OIDSPNEGO:
-		return asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 2}
-	case OIDKRB5:
-		return asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2}
-	case OIDMSLegacyKRB5:
-		return asn1.ObjectIdentifier{1, 2, 840, 48018, 1, 2, 2}
-	}
-	return asn1.ObjectIdentifier{}
-}
-
-// Status is the GSS-API status and implements the error interface.
-type Status struct {
-	Code    int
-	Message string
-}
-
-// Error returns the Status description.
-func (s Status) Error() string {
-	var str string
-	switch s.Code {
-	case StatusBadBindings:
-		str = "channel binding mismatch"
-	case StatusBadMech:
-		str = "unsupported mechanism requested"
-	case StatusBadName:
-		str = "invalid name provided"
-	case StatusBadNameType:
-		str = "name of unsupported type provided"
-	case StatusBadStatus:
-		str = "invalid input status selector"
-	case StatusBadSig:
-		str = "token had invalid integrity check"
-	case StatusBadMIC:
-		str = "preferred alias for GSS_S_BAD_SIG"
-	case StatusContextExpired:
-		str = "specified security context expired"
-	case StatusCredentialsExpired:
-		str = "expired credentials detected"
-	case StatusDefectiveCredential:
-		str = "defective credential detected"
-	case StatusDefectiveToken:
-		str = "defective token detected"
-	case StatusFailure:
-		str = "failure, unspecified at GSS-API level"
-	case StatusNoContext:
-		str = "no valid security context specified"
-	case StatusNoCred:
-		str = "no valid credentials provided"
-	case StatusBadQOP:
-		str = "unsupported QOP valu"
-	case StatusUnauthorized:
-		str = "operation unauthorized"
-	case StatusUnavailable:
-		str = "operation unavailable"
-	case StatusDuplicateElement:
-		str = "duplicate credential element requested"
-	case StatusNameNotMN:
-		str = "name contains multi-mechanism elements"
-	case StatusComplete:
-		str = "normal completion"
-	case StatusContinueNeeded:
-		str = "continuation call to routine required"
-	case StatusDuplicateToken:
-		str = "duplicate per-message token detected"
-	case StatusOldToken:
-		str = "timed-out per-message token detected"
-	case StatusUnseqToken:
-		str = "reordered (early) per-message token detected"
-	case StatusGapToken:
-		str = "skipped predecessor token(s) detected"
-	default:
-		str = "unknown GSS-API error status"
-	}
-	if s.Message != "" {
-		return fmt.Sprintf("%s: %s", str, s.Message)
-	}
-	return str
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/wrapToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/wrapToken.go
deleted file mode 100644
index 9dbf96b..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/gssapi/wrapToken.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package gssapi
-
-import (
-	"bytes"
-	"crypto/hmac"
-	"encoding/binary"
-	"encoding/hex"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-/*
-From RFC 4121, section 4.2.6.2:
-
-   Use of the GSS_Wrap() call yields a token (referred as the Wrap token
-   in this document), which consists of a descriptive header, followed
-   by a body portion that contains either the input user data in
-   plaintext concatenated with the checksum, or the input user data
-   encrypted.  The GSS_Wrap() token SHALL have the following format:
-
-         Octet no   Name        Description
-         --------------------------------------------------------------
-          0..1     TOK_ID    Identification field.  Tokens emitted by
-                             GSS_Wrap() contain the hex value 05 04
-                             expressed in big-endian order in this
-                             field.
-          2        Flags     Attributes field, as described in section
-                             4.2.2.
-          3        Filler    Contains the hex value FF.
-          4..5     EC        Contains the "extra count" field, in big-
-                             endian order as described in section 4.2.3.
-          6..7     RRC       Contains the "right rotation count" in big-
-                             endian order, as described in section
-                             4.2.5.
-          8..15    SndSeqNum   Sequence number field in clear text,
-                             expressed in big-endian order.
-          16..last Data      Encrypted data for Wrap tokens with
-                             confidentiality, or plaintext data followed
-                             by the checksum for Wrap tokens without
-                             confidentiality, as described in section
-                             4.2.4.
-
-Quick notes:
-	- "EC" or "Extra Count" refers to the length of the checksum.
-	- "Flags" (complete details in section 4.2.2) is a set of bits:
-		- if bit 0 is set, it means the token was sent by the acceptor (generally the kerberized service).
-		- bit 1 indicates that the token's payload is encrypted
- 		- bit 2 indicates if the message is protected using a subkey defined by the acceptor.
-	- When computing checksums, EC and RRC MUST be set to 0.
-    - Wrap Tokens are not ASN.1 encoded.
-*/
-const (
-	HdrLen          = 16 // Length of the Wrap Token's header
-	FillerByte byte = 0xFF
-)
-
-// WrapToken represents a GSS API Wrap token, as defined in RFC 4121.
-// It contains the header fields, the payload and the checksum, and provides
-// the logic for converting to/from bytes plus computing and verifying checksums
-type WrapToken struct {
-	// const GSS Token ID: 0x0504
-	Flags byte // contains three flags: acceptor, sealed, acceptor subkey
-	// const Filler: 0xFF
-	EC        uint16 // checksum length. big-endian
-	RRC       uint16 // right rotation count. big-endian
-	SndSeqNum uint64 // sender's sequence number. big-endian
-	Payload   []byte // your data! :)
-	CheckSum  []byte // authenticated checksum of { payload | header }
-}
-
-// Return the 2 bytes identifying a GSS API Wrap token
-func getGssWrapTokenId() *[2]byte {
-	return &[2]byte{0x05, 0x04}
-}
-
-// Marshal the WrapToken into a byte slice.
-// The payload should have been set and the checksum computed, otherwise an error is returned.
-func (wt *WrapToken) Marshal() ([]byte, error) {
-	if wt.CheckSum == nil {
-		return nil, errors.New("checksum has not been set")
-	}
-	if wt.Payload == nil {
-		return nil, errors.New("payload has not been set")
-	}
-
-	pldOffset := HdrLen                    // Offset of the payload in the token
-	chkSOffset := HdrLen + len(wt.Payload) // Offset of the checksum in the token
-
-	bytes := make([]byte, chkSOffset+int(wt.EC))
-	copy(bytes[0:], getGssWrapTokenId()[:])
-	bytes[2] = wt.Flags
-	bytes[3] = FillerByte
-	binary.BigEndian.PutUint16(bytes[4:6], wt.EC)
-	binary.BigEndian.PutUint16(bytes[6:8], wt.RRC)
-	binary.BigEndian.PutUint64(bytes[8:16], wt.SndSeqNum)
-	copy(bytes[pldOffset:], wt.Payload)
-	copy(bytes[chkSOffset:], wt.CheckSum)
-	return bytes, nil
-}
-
-// SetCheckSum uses the passed encryption key and key usage to compute the checksum over the payload and
-// the header, and sets the CheckSum field of this WrapToken.
-// If the payload has not been set or the checksum has already been set, an error is returned.
-func (wt *WrapToken) SetCheckSum(key types.EncryptionKey, keyUsage uint32) error {
-	if wt.Payload == nil {
-		return errors.New("payload has not been set")
-	}
-	if wt.CheckSum != nil {
-		return errors.New("checksum has already been computed")
-	}
-	chkSum, cErr := wt.computeCheckSum(key, keyUsage)
-	if cErr != nil {
-		return cErr
-	}
-	wt.CheckSum = chkSum
-	return nil
-}
-
-// ComputeCheckSum computes and returns the checksum of this token, computed using the passed key and key usage.
-// Conforms to RFC 4121 in that the checksum will be computed over { body | header },
-// with the EC and RRC flags zeroed out.
-// In the context of Kerberos Wrap tokens, mostly keyusage GSSAPI_ACCEPTOR_SEAL (=22)
-// and GSSAPI_INITIATOR_SEAL (=24) will be used.
-// Note: This will NOT update the struct's Checksum field.
-func (wt *WrapToken) computeCheckSum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) {
-	if wt.Payload == nil {
-		return nil, errors.New("cannot compute checksum with uninitialized payload")
-	}
-	// Build a slice containing { payload | header }
-	checksumMe := make([]byte, HdrLen+len(wt.Payload))
-	copy(checksumMe[0:], wt.Payload)
-	copy(checksumMe[len(wt.Payload):], getChecksumHeader(wt.Flags, wt.SndSeqNum))
-
-	encType, err := crypto.GetEtype(key.KeyType)
-	if err != nil {
-		return nil, err
-	}
-	return encType.GetChecksumHash(key.KeyValue, checksumMe, keyUsage)
-}
-
-// Build a header suitable for a checksum computation
-func getChecksumHeader(flags byte, senderSeqNum uint64) []byte {
-	header := make([]byte, 16)
-	copy(header[0:], []byte{0x05, 0x04, flags, 0xFF, 0x00, 0x00, 0x00, 0x00})
-	binary.BigEndian.PutUint64(header[8:], senderSeqNum)
-	return header
-}
-
-// Verify computes the token's checksum with the provided key and usage,
-// and compares it to the checksum present in the token.
-// In case of any failure, (false, Err) is returned, with Err an explanatory error.
-func (wt *WrapToken) Verify(key types.EncryptionKey, keyUsage uint32) (bool, error) {
-	computed, cErr := wt.computeCheckSum(key, keyUsage)
-	if cErr != nil {
-		return false, cErr
-	}
-	if !hmac.Equal(computed, wt.CheckSum) {
-		return false, fmt.Errorf(
-			"checksum mismatch. Computed: %s, Contained in token: %s",
-			hex.EncodeToString(computed), hex.EncodeToString(wt.CheckSum))
-	}
-	return true, nil
-}
-
-// Unmarshal bytes into the corresponding WrapToken.
-// If expectFromAcceptor is true, we expect the token to have been emitted by the gss acceptor,
-// and will check the according flag, returning an error if the token does not match the expectation.
-func (wt *WrapToken) Unmarshal(b []byte, expectFromAcceptor bool) error {
-	// Check if we can read a whole header
-	if len(b) < 16 {
-		return errors.New("bytes shorter than header length")
-	}
-	// Is the Token ID correct?
-	if !bytes.Equal(getGssWrapTokenId()[:], b[0:2]) {
-		return fmt.Errorf("wrong Token ID. Expected %s, was %s",
-			hex.EncodeToString(getGssWrapTokenId()[:]),
-			hex.EncodeToString(b[0:2]))
-	}
-	// Check the acceptor flag
-	flags := b[2]
-	isFromAcceptor := flags&0x01 == 1
-	if isFromAcceptor && !expectFromAcceptor {
-		return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor")
-	}
-	if !isFromAcceptor && expectFromAcceptor {
-		return errors.New("expected acceptor flag is not set: expecting a token from the acceptor, not the initiator")
-	}
-	// Check the filler byte
-	if b[3] != FillerByte {
-		return fmt.Errorf("unexpected filler byte: expecting 0xFF, was %s ", hex.EncodeToString(b[3:4]))
-	}
-	checksumL := binary.BigEndian.Uint16(b[4:6])
-	// Sanity check on the checksum length
-	if int(checksumL) > len(b)-HdrLen {
-		return fmt.Errorf("inconsistent checksum length: %d bytes to parse, checksum length is %d", len(b), checksumL)
-	}
-
-	wt.Flags = flags
-	wt.EC = checksumL
-	wt.RRC = binary.BigEndian.Uint16(b[6:8])
-	wt.SndSeqNum = binary.BigEndian.Uint64(b[8:16])
-	wt.Payload = b[16 : len(b)-int(checksumL)]
-	wt.CheckSum = b[len(b)-int(checksumL):]
-	return nil
-}
-
-// NewInitiatorWrapToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum.
-// Other flags are set to 0, and the RRC and sequence number are initialized to 0.
-// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier.
-// This is currently not supported.
-func NewInitiatorWrapToken(payload []byte, key types.EncryptionKey) (*WrapToken, error) {
-	encType, err := crypto.GetEtype(key.KeyType)
-	if err != nil {
-		return nil, err
-	}
-
-	token := WrapToken{
-		Flags: 0x00, // all zeroed out (this is a token sent by the initiator)
-		// Checksum size: length of output of the HMAC function, in bytes.
-		EC:        uint16(encType.GetHMACBitLength() / 8),
-		RRC:       0,
-		SndSeqNum: 0,
-		Payload:   payload,
-	}
-
-	if err := token.SetCheckSum(key, keyusage.GSSAPI_INITIATOR_SEAL); err != nil {
-		return nil, err
-	}
-
-	return &token, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/addrtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/addrtype/constants.go
deleted file mode 100644
index 457b89d..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/addrtype/constants.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Package addrtype provides Address type assigned numbers.
-package addrtype
-
-// Address type IDs.
-const (
-	IPv4          int32 = 2
-	Directional   int32 = 3
-	ChaosNet      int32 = 5
-	XNS           int32 = 6
-	ISO           int32 = 7
-	DECNETPhaseIV int32 = 12
-	AppleTalkDDP  int32 = 16
-	NetBios       int32 = 20
-	IPv6          int32 = 24
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/adtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/adtype/constants.go
deleted file mode 100644
index e805b74..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/adtype/constants.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Package adtype provides Authenticator type assigned numbers.
-package adtype
-
-// Authenticator type IDs.
-const (
-	ADIfRelevant                  int32 = 1
-	ADIntendedForServer           int32 = 2
-	ADIntendedForApplicationClass int32 = 3
-	ADKDCIssued                   int32 = 4
-	ADAndOr                       int32 = 5
-	ADMandatoryTicketExtensions   int32 = 6
-	ADInTicketExtensions          int32 = 7
-	ADMandatoryForKDC             int32 = 8
-	OSFDCE                        int32 = 64
-	SESAME                        int32 = 65
-	ADOSFDCEPKICertID             int32 = 66
-	ADAuthenticationStrength      int32 = 70
-	ADFXFastArmor                 int32 = 71
-	ADFXFastUsed                  int32 = 72
-	ADWin2KPAC                    int32 = 128
-	ADEtypeNegotiation            int32 = 129
-	//Reserved values                   9-63
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag/constants.go
deleted file mode 100644
index d74cd60..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag/constants.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Package asnAppTag provides ASN1 application tag numbers.
-package asnAppTag
-
-// ASN1 application tag numbers.
-const (
-	Ticket         = 1
-	Authenticator  = 2
-	EncTicketPart  = 3
-	ASREQ          = 10
-	TGSREQ         = 12
-	ASREP          = 11
-	TGSREP         = 13
-	APREQ          = 14
-	APREP          = 15
-	KRBSafe        = 20
-	KRBPriv        = 21
-	KRBCred        = 22
-	EncASRepPart   = 25
-	EncTGSRepPart  = 26
-	EncAPRepPart   = 27
-	EncKrbPrivPart = 28
-	EncKrbCredPart = 29
-	KRBError       = 30
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype/constants.go
deleted file mode 100644
index 93db952..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype/constants.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Package chksumtype provides Kerberos 5 checksum type assigned numbers.
-package chksumtype
-
-// Checksum type IDs.
-const (
-	//RESERVED : 0
-	CRC32         int32 = 1
-	RSA_MD4       int32 = 2
-	RSA_MD4_DES   int32 = 3
-	DES_MAC       int32 = 4
-	DES_MAC_K     int32 = 5
-	RSA_MD4_DES_K int32 = 6
-	RSA_MD5       int32 = 7
-	RSA_MD5_DES   int32 = 8
-	RSA_MD5_DES3  int32 = 9
-	SHA1_ID10     int32 = 10
-	//UNASSIGNED : 11
-	HMAC_SHA1_DES3_KD      int32 = 12
-	HMAC_SHA1_DES3         int32 = 13
-	SHA1_ID14              int32 = 14
-	HMAC_SHA1_96_AES128    int32 = 15
-	HMAC_SHA1_96_AES256    int32 = 16
-	CMAC_CAMELLIA128       int32 = 17
-	CMAC_CAMELLIA256       int32 = 18
-	HMAC_SHA256_128_AES128 int32 = 19
-	HMAC_SHA384_192_AES256 int32 = 20
-	//UNASSIGNED : 21-32770
-	GSSAPI int32 = 32771
-	//UNASSIGNED : 32772-2147483647
-	KERB_CHECKSUM_HMAC_MD5_UNSIGNED uint32 = 4294967158 // 0xFFFFFF76 documentation says this is -138 but in an unsigned int this is 4294967158
-	KERB_CHECKSUM_HMAC_MD5          int32  = -138
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/constants.go
deleted file mode 100644
index 0b8e916..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/constants.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// Package iana provides Kerberos 5 assigned numbers.
-package iana
-
-// PVNO is the Protocol Version Number.
-const PVNO = 5
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/errorcode/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/errorcode/constants.go
deleted file mode 100644
index fd756bc..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/errorcode/constants.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Package errorcode provides Kerberos 5 assigned error codes.
-package errorcode
-
-import "fmt"
-
-// Kerberos error codes.
-const (
-	KDC_ERR_NONE                          int32 = 0  //No error
-	KDC_ERR_NAME_EXP                      int32 = 1  //Client's entry in database has expired
-	KDC_ERR_SERVICE_EXP                   int32 = 2  //Server's entry in database has expired
-	KDC_ERR_BAD_PVNO                      int32 = 3  //Requested protocol version number not supported
-	KDC_ERR_C_OLD_MAST_KVNO               int32 = 4  //Client's key encrypted in old master key
-	KDC_ERR_S_OLD_MAST_KVNO               int32 = 5  //Server's key encrypted in old master key
-	KDC_ERR_C_PRINCIPAL_UNKNOWN           int32 = 6  //Client not found in Kerberos database
-	KDC_ERR_S_PRINCIPAL_UNKNOWN           int32 = 7  //Server not found in Kerberos database
-	KDC_ERR_PRINCIPAL_NOT_UNIQUE          int32 = 8  //Multiple principal entries in database
-	KDC_ERR_NULL_KEY                      int32 = 9  //The client or server has a null key
-	KDC_ERR_CANNOT_POSTDATE               int32 = 10 //Ticket not eligible for  postdating
-	KDC_ERR_NEVER_VALID                   int32 = 11 //Requested starttime is later than end time
-	KDC_ERR_POLICY                        int32 = 12 //KDC policy rejects request
-	KDC_ERR_BADOPTION                     int32 = 13 //KDC cannot accommodate requested option
-	KDC_ERR_ETYPE_NOSUPP                  int32 = 14 //KDC has no support for  encryption type
-	KDC_ERR_SUMTYPE_NOSUPP                int32 = 15 //KDC has no support for  checksum type
-	KDC_ERR_PADATA_TYPE_NOSUPP            int32 = 16 //KDC has no support for  padata type
-	KDC_ERR_TRTYPE_NOSUPP                 int32 = 17 //KDC has no support for  transited type
-	KDC_ERR_CLIENT_REVOKED                int32 = 18 //Clients credentials have been revoked
-	KDC_ERR_SERVICE_REVOKED               int32 = 19 //Credentials for server have been revoked
-	KDC_ERR_TGT_REVOKED                   int32 = 20 //TGT has been revoked
-	KDC_ERR_CLIENT_NOTYET                 int32 = 21 //Client not yet valid; try again later
-	KDC_ERR_SERVICE_NOTYET                int32 = 22 //Server not yet valid; try again later
-	KDC_ERR_KEY_EXPIRED                   int32 = 23 //Password has expired; change password to reset
-	KDC_ERR_PREAUTH_FAILED                int32 = 24 //Pre-authentication information was invalid
-	KDC_ERR_PREAUTH_REQUIRED              int32 = 25 //Additional pre-authentication required
-	KDC_ERR_SERVER_NOMATCH                int32 = 26 //Requested server and ticket don't match
-	KDC_ERR_MUST_USE_USER2USER            int32 = 27 //Server principal valid for  user2user only
-	KDC_ERR_PATH_NOT_ACCEPTED             int32 = 28 //KDC Policy rejects transited path
-	KDC_ERR_SVC_UNAVAILABLE               int32 = 29 //A service is not available
-	KRB_AP_ERR_BAD_INTEGRITY              int32 = 31 //Integrity check on decrypted field failed
-	KRB_AP_ERR_TKT_EXPIRED                int32 = 32 //Ticket expired
-	KRB_AP_ERR_TKT_NYV                    int32 = 33 //Ticket not yet valid
-	KRB_AP_ERR_REPEAT                     int32 = 34 //Request is a replay
-	KRB_AP_ERR_NOT_US                     int32 = 35 //The ticket isn't for us
-	KRB_AP_ERR_BADMATCH                   int32 = 36 //Ticket and authenticator don't match
-	KRB_AP_ERR_SKEW                       int32 = 37 //Clock skew too great
-	KRB_AP_ERR_BADADDR                    int32 = 38 //Incorrect net address
-	KRB_AP_ERR_BADVERSION                 int32 = 39 //Protocol version mismatch
-	KRB_AP_ERR_MSG_TYPE                   int32 = 40 //Invalid msg type
-	KRB_AP_ERR_MODIFIED                   int32 = 41 //Message stream modified
-	KRB_AP_ERR_BADORDER                   int32 = 42 //Message out of order
-	KRB_AP_ERR_BADKEYVER                  int32 = 44 //Specified version of key is not available
-	KRB_AP_ERR_NOKEY                      int32 = 45 //Service key not available
-	KRB_AP_ERR_MUT_FAIL                   int32 = 46 //Mutual authentication failed
-	KRB_AP_ERR_BADDIRECTION               int32 = 47 //Incorrect message direction
-	KRB_AP_ERR_METHOD                     int32 = 48 //Alternative authentication method required
-	KRB_AP_ERR_BADSEQ                     int32 = 49 //Incorrect sequence number in message
-	KRB_AP_ERR_INAPP_CKSUM                int32 = 50 //Inappropriate type of checksum in message
-	KRB_AP_PATH_NOT_ACCEPTED              int32 = 51 //Policy rejects transited path
-	KRB_ERR_RESPONSE_TOO_BIG              int32 = 52 //Response too big for UDP;  retry with TCP
-	KRB_ERR_GENERIC                       int32 = 60 //Generic error (description in e-text)
-	KRB_ERR_FIELD_TOOLONG                 int32 = 61 //Field is too long for this implementation
-	KDC_ERROR_CLIENT_NOT_TRUSTED          int32 = 62 //Reserved for PKINIT
-	KDC_ERROR_KDC_NOT_TRUSTED             int32 = 63 //Reserved for PKINIT
-	KDC_ERROR_INVALID_SIG                 int32 = 64 //Reserved for PKINIT
-	KDC_ERR_KEY_TOO_WEAK                  int32 = 65 //Reserved for PKINIT
-	KDC_ERR_CERTIFICATE_MISMATCH          int32 = 66 //Reserved for PKINIT
-	KRB_AP_ERR_NO_TGT                     int32 = 67 //No TGT available to validate USER-TO-USER
-	KDC_ERR_WRONG_REALM                   int32 = 68 //Reserved for future use
-	KRB_AP_ERR_USER_TO_USER_REQUIRED      int32 = 69 //Ticket must be for  USER-TO-USER
-	KDC_ERR_CANT_VERIFY_CERTIFICATE       int32 = 70 //Reserved for PKINIT
-	KDC_ERR_INVALID_CERTIFICATE           int32 = 71 //Reserved for PKINIT
-	KDC_ERR_REVOKED_CERTIFICATE           int32 = 72 //Reserved for PKINIT
-	KDC_ERR_REVOCATION_STATUS_UNKNOWN     int32 = 73 //Reserved for PKINIT
-	KDC_ERR_REVOCATION_STATUS_UNAVAILABLE int32 = 74 //Reserved for PKINIT
-	KDC_ERR_CLIENT_NAME_MISMATCH          int32 = 75 //Reserved for PKINIT
-	KDC_ERR_KDC_NAME_MISMATCH             int32 = 76 //Reserved for PKINIT
-)
-
-// Lookup an error code description.
-func Lookup(i int32) string {
-	if s, ok := errorcodeLookup[i]; ok {
-		return fmt.Sprintf("(%d) %s", i, s)
-	}
-	return fmt.Sprintf("Unknown ErrorCode %d", i)
-}
-
-var errorcodeLookup = map[int32]string{
-	KDC_ERR_NONE:                          "KDC_ERR_NONE No error",
-	KDC_ERR_NAME_EXP:                      "KDC_ERR_NAME_EXP Client's entry in database has expired",
-	KDC_ERR_SERVICE_EXP:                   "KDC_ERR_SERVICE_EXP Server's entry in database has expired",
-	KDC_ERR_BAD_PVNO:                      "KDC_ERR_BAD_PVNO Requested protocol version number not supported",
-	KDC_ERR_C_OLD_MAST_KVNO:               "KDC_ERR_C_OLD_MAST_KVNO Client's key encrypted in old master key",
-	KDC_ERR_S_OLD_MAST_KVNO:               "KDC_ERR_S_OLD_MAST_KVNO Server's key encrypted in old master key",
-	KDC_ERR_C_PRINCIPAL_UNKNOWN:           "KDC_ERR_C_PRINCIPAL_UNKNOWN Client not found in Kerberos database",
-	KDC_ERR_S_PRINCIPAL_UNKNOWN:           "KDC_ERR_S_PRINCIPAL_UNKNOWN Server not found in Kerberos database",
-	KDC_ERR_PRINCIPAL_NOT_UNIQUE:          "KDC_ERR_PRINCIPAL_NOT_UNIQUE Multiple principal entries in database",
-	KDC_ERR_NULL_KEY:                      "KDC_ERR_NULL_KEY The client or server has a null key",
-	KDC_ERR_CANNOT_POSTDATE:               "KDC_ERR_CANNOT_POSTDATE Ticket not eligible for postdating",
-	KDC_ERR_NEVER_VALID:                   "KDC_ERR_NEVER_VALID Requested starttime is later than end time",
-	KDC_ERR_POLICY:                        "KDC_ERR_POLICY KDC policy rejects request",
-	KDC_ERR_BADOPTION:                     "KDC_ERR_BADOPTION KDC cannot accommodate requested option",
-	KDC_ERR_ETYPE_NOSUPP:                  "KDC_ERR_ETYPE_NOSUPP KDC has no support for encryption type",
-	KDC_ERR_SUMTYPE_NOSUPP:                "KDC_ERR_SUMTYPE_NOSUPP KDC has no support for checksum type",
-	KDC_ERR_PADATA_TYPE_NOSUPP:            "KDC_ERR_PADATA_TYPE_NOSUPP KDC has no support for padata type",
-	KDC_ERR_TRTYPE_NOSUPP:                 "KDC_ERR_TRTYPE_NOSUPP KDC has no support for transited type",
-	KDC_ERR_CLIENT_REVOKED:                "KDC_ERR_CLIENT_REVOKED Clients credentials have been revoked",
-	KDC_ERR_SERVICE_REVOKED:               "KDC_ERR_SERVICE_REVOKED Credentials for server have been revoked",
-	KDC_ERR_TGT_REVOKED:                   "KDC_ERR_TGT_REVOKED TGT has been revoked",
-	KDC_ERR_CLIENT_NOTYET:                 "KDC_ERR_CLIENT_NOTYET Client not yet valid; try again later",
-	KDC_ERR_SERVICE_NOTYET:                "KDC_ERR_SERVICE_NOTYET Server not yet valid; try again later",
-	KDC_ERR_KEY_EXPIRED:                   "KDC_ERR_KEY_EXPIRED Password has expired; change password to reset",
-	KDC_ERR_PREAUTH_FAILED:                "KDC_ERR_PREAUTH_FAILED Pre-authentication information was invalid",
-	KDC_ERR_PREAUTH_REQUIRED:              "KDC_ERR_PREAUTH_REQUIRED Additional pre-authentication required",
-	KDC_ERR_SERVER_NOMATCH:                "KDC_ERR_SERVER_NOMATCH Requested server and ticket don't match",
-	KDC_ERR_MUST_USE_USER2USER:            "KDC_ERR_MUST_USE_USER2USER Server principal valid for  user2user only",
-	KDC_ERR_PATH_NOT_ACCEPTED:             "KDC_ERR_PATH_NOT_ACCEPTED KDC Policy rejects transited path",
-	KDC_ERR_SVC_UNAVAILABLE:               "KDC_ERR_SVC_UNAVAILABLE A service is not available",
-	KRB_AP_ERR_BAD_INTEGRITY:              "KRB_AP_ERR_BAD_INTEGRITY Integrity check on decrypted field failed",
-	KRB_AP_ERR_TKT_EXPIRED:                "KRB_AP_ERR_TKT_EXPIRED Ticket expired",
-	KRB_AP_ERR_TKT_NYV:                    "KRB_AP_ERR_TKT_NYV Ticket not yet valid",
-	KRB_AP_ERR_REPEAT:                     "KRB_AP_ERR_REPEAT Request is a replay",
-	KRB_AP_ERR_NOT_US:                     "KRB_AP_ERR_NOT_US The ticket isn't for us",
-	KRB_AP_ERR_BADMATCH:                   "KRB_AP_ERR_BADMATCH Ticket and authenticator don't match",
-	KRB_AP_ERR_SKEW:                       "KRB_AP_ERR_SKEW Clock skew too great",
-	KRB_AP_ERR_BADADDR:                    "KRB_AP_ERR_BADADDR Incorrect net address",
-	KRB_AP_ERR_BADVERSION:                 "KRB_AP_ERR_BADVERSION Protocol version mismatch",
-	KRB_AP_ERR_MSG_TYPE:                   "KRB_AP_ERR_MSG_TYPE Invalid msg type",
-	KRB_AP_ERR_MODIFIED:                   "KRB_AP_ERR_MODIFIED Message stream modified",
-	KRB_AP_ERR_BADORDER:                   "KRB_AP_ERR_BADORDER Message out of order",
-	KRB_AP_ERR_BADKEYVER:                  "KRB_AP_ERR_BADKEYVER Specified version of key is not available",
-	KRB_AP_ERR_NOKEY:                      "KRB_AP_ERR_NOKEY Service key not available",
-	KRB_AP_ERR_MUT_FAIL:                   "KRB_AP_ERR_MUT_FAIL Mutual authentication failed",
-	KRB_AP_ERR_BADDIRECTION:               "KRB_AP_ERR_BADDIRECTION Incorrect message direction",
-	KRB_AP_ERR_METHOD:                     "KRB_AP_ERR_METHOD Alternative authentication method required",
-	KRB_AP_ERR_BADSEQ:                     "KRB_AP_ERR_BADSEQ Incorrect sequence number in message",
-	KRB_AP_ERR_INAPP_CKSUM:                "KRB_AP_ERR_INAPP_CKSUM Inappropriate type of checksum in message",
-	KRB_AP_PATH_NOT_ACCEPTED:              "KRB_AP_PATH_NOT_ACCEPTED Policy rejects transited path",
-	KRB_ERR_RESPONSE_TOO_BIG:              "KRB_ERR_RESPONSE_TOO_BIG Response too big for UDP; retry with TCP",
-	KRB_ERR_GENERIC:                       "KRB_ERR_GENERIC Generic error (description in e-text)",
-	KRB_ERR_FIELD_TOOLONG:                 "KRB_ERR_FIELD_TOOLONG Field is too long for this implementation",
-	KDC_ERROR_CLIENT_NOT_TRUSTED:          "KDC_ERROR_CLIENT_NOT_TRUSTED Reserved for PKINIT",
-	KDC_ERROR_KDC_NOT_TRUSTED:             "KDC_ERROR_KDC_NOT_TRUSTED Reserved for PKINIT",
-	KDC_ERROR_INVALID_SIG:                 "KDC_ERROR_INVALID_SIG Reserved for PKINIT",
-	KDC_ERR_KEY_TOO_WEAK:                  "KDC_ERR_KEY_TOO_WEAK Reserved for PKINIT",
-	KDC_ERR_CERTIFICATE_MISMATCH:          "KDC_ERR_CERTIFICATE_MISMATCH Reserved for PKINIT",
-	KRB_AP_ERR_NO_TGT:                     "KRB_AP_ERR_NO_TGT No TGT available to validate USER-TO-USER",
-	KDC_ERR_WRONG_REALM:                   "KDC_ERR_WRONG_REALM Reserved for future use",
-	KRB_AP_ERR_USER_TO_USER_REQUIRED:      "KRB_AP_ERR_USER_TO_USER_REQUIRED Ticket must be for USER-TO-USER",
-	KDC_ERR_CANT_VERIFY_CERTIFICATE:       "KDC_ERR_CANT_VERIFY_CERTIFICATE Reserved for PKINIT",
-	KDC_ERR_INVALID_CERTIFICATE:           "KDC_ERR_INVALID_CERTIFICATE Reserved for PKINIT",
-	KDC_ERR_REVOKED_CERTIFICATE:           "KDC_ERR_REVOKED_CERTIFICATE Reserved for PKINIT",
-	KDC_ERR_REVOCATION_STATUS_UNKNOWN:     "KDC_ERR_REVOCATION_STATUS_UNKNOWN Reserved for PKINIT",
-	KDC_ERR_REVOCATION_STATUS_UNAVAILABLE: "KDC_ERR_REVOCATION_STATUS_UNAVAILABLE Reserved for PKINIT",
-	KDC_ERR_CLIENT_NAME_MISMATCH:          "KDC_ERR_CLIENT_NAME_MISMATCH Reserved for PKINIT",
-	KDC_ERR_KDC_NAME_MISMATCH:             "KDC_ERR_KDC_NAME_MISMATCH Reserved for PKINIT",
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/etypeID/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/etypeID/constants.go
deleted file mode 100644
index 46a0d74..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/etypeID/constants.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Package etypeID provides Kerberos 5 encryption type assigned numbers.
-package etypeID
-
-// Kerberos encryption type assigned numbers.
-const (
-	//RESERVED : 0
-	DES_CBC_CRC                  int32 = 1
-	DES_CBC_MD4                  int32 = 2
-	DES_CBC_MD5                  int32 = 3
-	DES_CBC_RAW                  int32 = 4
-	DES3_CBC_MD5                 int32 = 5
-	DES3_CBC_RAW                 int32 = 6
-	DES3_CBC_SHA1                int32 = 7
-	DES_HMAC_SHA1                int32 = 8
-	DSAWITHSHA1_CMSOID           int32 = 9
-	MD5WITHRSAENCRYPTION_CMSOID  int32 = 10
-	SHA1WITHRSAENCRYPTION_CMSOID int32 = 11
-	RC2CBC_ENVOID                int32 = 12
-	RSAENCRYPTION_ENVOID         int32 = 13
-	RSAES_OAEP_ENV_OID           int32 = 14
-	DES_EDE3_CBC_ENV_OID         int32 = 15
-	DES3_CBC_SHA1_KD             int32 = 16
-	AES128_CTS_HMAC_SHA1_96      int32 = 17
-	AES256_CTS_HMAC_SHA1_96      int32 = 18
-	AES128_CTS_HMAC_SHA256_128   int32 = 19
-	AES256_CTS_HMAC_SHA384_192   int32 = 20
-	//UNASSIGNED : 21-22
-	RC4_HMAC             int32 = 23
-	RC4_HMAC_EXP         int32 = 24
-	CAMELLIA128_CTS_CMAC int32 = 25
-	CAMELLIA256_CTS_CMAC int32 = 26
-	//UNASSIGNED : 27-64
-	SUBKEY_KEYMATERIAL int32 = 65
-	//UNASSIGNED : 66-2147483647
-)
-
-// ETypesByName is a map of EncType names to their assigned EncType number.
-var ETypesByName = map[string]int32{
-	"des-cbc-crc":                  DES_CBC_CRC,
-	"des-cbc-md4":                  DES_CBC_MD4,
-	"des-cbc-md5":                  DES_CBC_MD5,
-	"des-cbc-raw":                  DES_CBC_RAW,
-	"des3-cbc-md5":                 DES3_CBC_MD5,
-	"des3-cbc-raw":                 DES3_CBC_RAW,
-	"des3-cbc-sha1":                DES3_CBC_SHA1,
-	"des3-hmac-sha1":               DES_HMAC_SHA1,
-	"des3-cbc-sha1-kd":             DES3_CBC_SHA1_KD,
-	"des-hmac-sha1":                DES_HMAC_SHA1,
-	"dsaWithSHA1-CmsOID":           DSAWITHSHA1_CMSOID,
-	"md5WithRSAEncryption-CmsOID":  MD5WITHRSAENCRYPTION_CMSOID,
-	"sha1WithRSAEncryption-CmsOID": SHA1WITHRSAENCRYPTION_CMSOID,
-	"rc2CBC-EnvOID":                RC2CBC_ENVOID,
-	"rsaEncryption-EnvOID":         RSAENCRYPTION_ENVOID,
-	"rsaES-OAEP-ENV-OID":           RSAES_OAEP_ENV_OID,
-	"des-ede3-cbc-Env-OID":         DES_EDE3_CBC_ENV_OID,
-	"aes128-cts-hmac-sha1-96":      AES128_CTS_HMAC_SHA1_96,
-	"aes128-cts":                   AES128_CTS_HMAC_SHA1_96,
-	"aes128-sha1":                  AES128_CTS_HMAC_SHA1_96,
-	"aes256-cts-hmac-sha1-96":      AES256_CTS_HMAC_SHA1_96,
-	"aes256-cts":                   AES256_CTS_HMAC_SHA1_96,
-	"aes256-sha1":                  AES256_CTS_HMAC_SHA1_96,
-	"aes128-cts-hmac-sha256-128":   AES128_CTS_HMAC_SHA256_128,
-	"aes128-sha2":                  AES128_CTS_HMAC_SHA256_128,
-	"aes256-cts-hmac-sha384-192":   AES256_CTS_HMAC_SHA384_192,
-	"aes256-sha2":                  AES256_CTS_HMAC_SHA384_192,
-	"arcfour-hmac":                 RC4_HMAC,
-	"rc4-hmac":                     RC4_HMAC,
-	"arcfour-hmac-md5":             RC4_HMAC,
-	"arcfour-hmac-exp":             RC4_HMAC_EXP,
-	"rc4-hmac-exp":                 RC4_HMAC_EXP,
-	"arcfour-hmac-md5-exp":         RC4_HMAC_EXP,
-	"camellia128-cts-cmac":         CAMELLIA128_CTS_CMAC,
-	"camellia128-cts":              CAMELLIA128_CTS_CMAC,
-	"camellia256-cts-cmac":         CAMELLIA256_CTS_CMAC,
-	"camellia256-cts":              CAMELLIA256_CTS_CMAC,
-	"subkey-keymaterial":           SUBKEY_KEYMATERIAL,
-}
-
-// EtypeSupported resolves the etype name string to the etype ID.
-// If zero is returned the etype is not supported by gokrb5.
-func EtypeSupported(etype string) int32 {
-	// Slice of supported enctype IDs
-	s := []int32{
-		AES128_CTS_HMAC_SHA1_96,
-		AES256_CTS_HMAC_SHA1_96,
-		AES128_CTS_HMAC_SHA256_128,
-		AES256_CTS_HMAC_SHA384_192,
-		DES3_CBC_SHA1_KD,
-		RC4_HMAC,
-	}
-	id := ETypesByName[etype]
-	if id == 0 {
-		return id
-	}
-	for _, sid := range s {
-		if id == sid {
-			return id
-		}
-	}
-	return 0
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/flags/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/flags/constants.go
deleted file mode 100644
index 787801f..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/flags/constants.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Package flags provides Kerberos 5 flag assigned numbers.
-package flags
-
-// Flag values for KRB5 messages and tickets.
-const (
-	Reserved               = 0
-	Forwardable            = 1
-	Forwarded              = 2
-	Proxiable              = 3
-	Proxy                  = 4
-	AllowPostDate          = 5
-	MayPostDate            = 5
-	PostDated              = 6
-	Invalid                = 7
-	Renewable              = 8
-	Initial                = 9
-	PreAuthent             = 10
-	HWAuthent              = 11
-	OptHardwareAuth        = 11
-	RequestAnonymous       = 12
-	TransitedPolicyChecked = 12
-	OKAsDelegate           = 13
-	EncPARep               = 15
-	Canonicalize           = 15
-	DisableTransitedCheck  = 26
-	RenewableOK            = 27
-	EncTktInSkey           = 28
-	Renew                  = 30
-	Validate               = 31
-
-	// AP Option Flags
-	// 0 Reserved for future use.
-	APOptionUseSessionKey  = 1
-	APOptionMutualRequired = 2
-	// 3-31 Reserved for future use.
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/keyusage/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/keyusage/constants.go
deleted file mode 100644
index 5b232d1..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/keyusage/constants.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Package keyusage provides Kerberos 5 key usage assigned numbers.
-package keyusage
-
-// Key usage numbers.
-const (
-	AS_REQ_PA_ENC_TIMESTAMP                        = 1
-	KDC_REP_TICKET                                 = 2
-	AS_REP_ENCPART                                 = 3
-	TGS_REQ_KDC_REQ_BODY_AUTHDATA_SESSION_KEY      = 4
-	TGS_REQ_KDC_REQ_BODY_AUTHDATA_SUB_KEY          = 5
-	TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM = 6
-	TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR        = 7
-	TGS_REP_ENCPART_SESSION_KEY                    = 8
-	TGS_REP_ENCPART_AUTHENTICATOR_SUB_KEY          = 9
-	AP_REQ_AUTHENTICATOR_CHKSUM                    = 10
-	AP_REQ_AUTHENTICATOR                           = 11
-	AP_REP_ENCPART                                 = 12
-	KRB_PRIV_ENCPART                               = 13
-	KRB_CRED_ENCPART                               = 14
-	KRB_SAFE_CHKSUM                                = 15
-	KERB_NON_KERB_SALT                             = 16
-	KERB_NON_KERB_CKSUM_SALT                       = 17
-	//18.  Reserved for future use in Kerberos and related protocols.
-	AD_KDC_ISSUED_CHKSUM = 19
-	//20-21.  Reserved for future use in Kerberos and related protocols.
-	GSSAPI_ACCEPTOR_SEAL           = 22
-	GSSAPI_ACCEPTOR_SIGN           = 23
-	GSSAPI_INITIATOR_SEAL          = 24
-	GSSAPI_INITIATOR_SIGN          = 25
-	KEY_USAGE_FAST_REQ_CHKSUM      = 50
-	KEY_USAGE_FAST_ENC             = 51
-	KEY_USAGE_FAST_REP             = 52
-	KEY_USAGE_FAST_FINISHED        = 53
-	KEY_USAGE_ENC_CHALLENGE_CLIENT = 54
-	KEY_USAGE_ENC_CHALLENGE_KDC    = 55
-	KEY_USAGE_AS_REQ               = 56
-	//26-511.  Reserved for future use in Kerberos and related protocols.
-	//512-1023.  Reserved for uses internal to a Kerberos implementation.
-	//1024.  Encryption for application use in protocols that do not specify key usage values
-	//1025.  Checksums for application use in protocols that do not specify key usage values
-	//1026-2047.  Reserved for application use.
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go
deleted file mode 100644
index ad21810..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/msgtype/constants.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Package msgtype provides Kerberos 5 message type assigned numbers.
-package msgtype
-
-// KRB message type IDs.
-const (
-	KRB_AS_REQ     = 10 //Request for initial authentication
-	KRB_AS_REP     = 11 //Response to KRB_AS_REQ request
-	KRB_TGS_REQ    = 12 //Request for authentication based on TGT
-	KRB_TGS_REP    = 13 //Response to KRB_TGS_REQ request
-	KRB_AP_REQ     = 14 //Application request to server
-	KRB_AP_REP     = 15 //Response to KRB_AP_REQ_MUTUAL
-	KRB_RESERVED16 = 16 //Reserved for user-to-user krb_tgt_request
-	KRB_RESERVED17 = 17 //Reserved for user-to-user krb_tgt_reply
-	KRB_SAFE       = 20 // Safe (checksummed) application message
-	KRB_PRIV       = 21 // Private (encrypted) application message
-	KRB_CRED       = 22 //Private (encrypted) message to forward credentials
-	KRB_ERROR      = 30 //Error response
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/nametype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/nametype/constants.go
deleted file mode 100644
index c111a05..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/nametype/constants.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Package nametype provides Kerberos 5 principal name type numbers.
-package nametype
-
-// Kerberos name type IDs.
-const (
-	KRB_NT_UNKNOWN        int32 = 0  //Name type not known
-	KRB_NT_PRINCIPAL      int32 = 1  //Just the name of the principal as in DCE,  or for users
-	KRB_NT_SRV_INST       int32 = 2  //Service and other unique instance (krbtgt)
-	KRB_NT_SRV_HST        int32 = 3  //Service with host name as instance (telnet, rcommands)
-	KRB_NT_SRV_XHST       int32 = 4  //Service with host as remaining components
-	KRB_NT_UID            int32 = 5  //Unique ID
-	KRB_NT_X500_PRINCIPAL int32 = 6  //Encoded X.509 Distinguished name [RFC2253]
-	KRB_NT_SMTP_NAME      int32 = 7  //Name in form of SMTP email name (e.g., user@example.com)
-	KRB_NT_ENTERPRISE     int32 = 10 //Enterprise name; may be mapped to principal name
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/patype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/patype/constants.go
deleted file mode 100644
index aa04f63..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/iana/patype/constants.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Package patype provides Kerberos 5 pre-authentication type assigned numbers.
-package patype
-
-// Kerberos pre-authentication type assigned numbers.
-const (
-	PA_TGS_REQ       int32 = 1
-	PA_ENC_TIMESTAMP int32 = 2
-	PA_PW_SALT       int32 = 3
-	//RESERVED : 4
-	PA_ENC_UNIX_TIME       int32 = 5
-	PA_SANDIA_SECUREID     int32 = 6
-	PA_SESAME              int32 = 7
-	PA_OSF_DCE             int32 = 8
-	PA_CYBERSAFE_SECUREID  int32 = 9
-	PA_AFS3_SALT           int32 = 10
-	PA_ETYPE_INFO          int32 = 11
-	PA_SAM_CHALLENGE       int32 = 12
-	PA_SAM_RESPONSE        int32 = 13
-	PA_PK_AS_REQ_OLD       int32 = 14
-	PA_PK_AS_REP_OLD       int32 = 15
-	PA_PK_AS_REQ           int32 = 16
-	PA_PK_AS_REP           int32 = 17
-	PA_PK_OCSP_RESPONSE    int32 = 18
-	PA_ETYPE_INFO2         int32 = 19
-	PA_USE_SPECIFIED_KVNO  int32 = 20
-	PA_SVR_REFERRAL_INFO   int32 = 20
-	PA_SAM_REDIRECT        int32 = 21
-	PA_GET_FROM_TYPED_DATA int32 = 22
-	TD_PADATA              int32 = 22
-	PA_SAM_ETYPE_INFO      int32 = 23
-	PA_ALT_PRINC           int32 = 24
-	PA_SERVER_REFERRAL     int32 = 25
-	//UNASSIGNED : 26-29
-	PA_SAM_CHALLENGE2 int32 = 30
-	PA_SAM_RESPONSE2  int32 = 31
-	//UNASSIGNED : 32-40
-	PA_EXTRA_TGT int32 = 41
-	//UNASSIGNED : 42-100
-	TD_PKINIT_CMS_CERTIFICATES int32 = 101
-	TD_KRB_PRINCIPAL           int32 = 102
-	TD_KRB_REALM               int32 = 103
-	TD_TRUSTED_CERTIFIERS      int32 = 104
-	TD_CERTIFICATE_INDEX       int32 = 105
-	TD_APP_DEFINED_ERROR       int32 = 106
-	TD_REQ_NONCE               int32 = 107
-	TD_REQ_SEQ                 int32 = 108
-	TD_DH_PARAMETERS           int32 = 109
-	//UNASSIGNED : 110
-	TD_CMS_DIGEST_ALGORITHMS  int32 = 111
-	TD_CERT_DIGEST_ALGORITHMS int32 = 112
-	//UNASSIGNED : 113-127
-	PA_PAC_REQUEST         int32 = 128
-	PA_FOR_USER            int32 = 129
-	PA_FOR_X509_USER       int32 = 130
-	PA_FOR_CHECK_DUPS      int32 = 131
-	PA_AS_CHECKSUM         int32 = 132
-	PA_FX_COOKIE           int32 = 133
-	PA_AUTHENTICATION_SET  int32 = 134
-	PA_AUTH_SET_SELECTED   int32 = 135
-	PA_FX_FAST             int32 = 136
-	PA_FX_ERROR            int32 = 137
-	PA_ENCRYPTED_CHALLENGE int32 = 138
-	//UNASSIGNED : 139-140
-	PA_OTP_CHALLENGE  int32 = 141
-	PA_OTP_REQUEST    int32 = 142
-	PA_OTP_CONFIRM    int32 = 143
-	PA_OTP_PIN_CHANGE int32 = 144
-	PA_EPAK_AS_REQ    int32 = 145
-	PA_EPAK_AS_REP    int32 = 146
-	PA_PKINIT_KX      int32 = 147
-	PA_PKU2U_NAME     int32 = 148
-	PA_REQ_ENC_PA_REP int32 = 149
-	PA_AS_FRESHNESS   int32 = 150
-	//UNASSIGNED : 151-164
-	PA_SUPPORTED_ETYPES int32 = 165
-	PA_EXTENDED_ERROR   int32 = 166
-)
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/changepasswddata.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/changepasswddata.go
deleted file mode 100644
index a3e2efd..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/changepasswddata.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package kadmin
-
-import (
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// ChangePasswdData is the payload to a password change message.
-type ChangePasswdData struct {
-	NewPasswd []byte              `asn1:"explicit,tag:0"`
-	TargName  types.PrincipalName `asn1:"explicit,optional,tag:1"`
-	TargRealm string              `asn1:"generalstring,optional,explicit,tag:2"`
-}
-
-// Marshal ChangePasswdData into a byte slice.
-func (c *ChangePasswdData) Marshal() ([]byte, error) {
-	b, err := asn1.Marshal(*c)
-	if err != nil {
-		return []byte{}, err
-	}
-	//b = asn1tools.AddASNAppTag(b, asnAppTag.)
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/message.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/message.go
deleted file mode 100644
index 157fcad..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/message.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package kadmin
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-const (
-	verisonHex = "ff80"
-)
-
-// Request message for changing password.
-type Request struct {
-	APREQ   messages.APReq
-	KRBPriv messages.KRBPriv
-}
-
-// Reply message for a password change.
-type Reply struct {
-	MessageLength int
-	Version       int
-	APREPLength   int
-	APREP         messages.APRep
-	KRBPriv       messages.KRBPriv
-	KRBError      messages.KRBError
-	IsKRBError    bool
-	ResultCode    uint16
-	Result        string
-}
-
-// Marshal a Request into a byte slice.
-func (m *Request) Marshal() (b []byte, err error) {
-	b = []byte{255, 128} // protocol version number: contains the hex constant 0xff80 (big-endian integer).
-	ab, e := m.APREQ.Marshal()
-	if e != nil {
-		err = fmt.Errorf("error marshaling AP_REQ: %v", e)
-		return
-	}
-	if len(ab) > math.MaxUint16 {
-		err = errors.New("length of AP_REQ greater then max Uint16 size")
-		return
-	}
-	al := make([]byte, 2)
-	binary.BigEndian.PutUint16(al, uint16(len(ab)))
-	b = append(b, al...)
-	b = append(b, ab...)
-	pb, e := m.KRBPriv.Marshal()
-	if e != nil {
-		err = fmt.Errorf("error marshaling KRB_Priv: %v", e)
-		return
-	}
-	b = append(b, pb...)
-	if len(b)+2 > math.MaxUint16 {
-		err = errors.New("length of message greater then max Uint16 size")
-		return
-	}
-	ml := make([]byte, 2)
-	binary.BigEndian.PutUint16(ml, uint16(len(b)+2))
-	b = append(ml, b...)
-	return
-}
-
-// Unmarshal a byte slice into a Reply.
-func (m *Reply) Unmarshal(b []byte) error {
-	m.MessageLength = int(binary.BigEndian.Uint16(b[0:2]))
-	m.Version = int(binary.BigEndian.Uint16(b[2:4]))
-	if m.Version != 1 {
-		return fmt.Errorf("kadmin reply has incorrect protocol version number: %d", m.Version)
-	}
-	m.APREPLength = int(binary.BigEndian.Uint16(b[4:6]))
-	if m.APREPLength != 0 {
-		err := m.APREP.Unmarshal(b[6 : 6+m.APREPLength])
-		if err != nil {
-			return err
-		}
-		err = m.KRBPriv.Unmarshal(b[6+m.APREPLength : m.MessageLength])
-		if err != nil {
-			return err
-		}
-	} else {
-		m.IsKRBError = true
-		m.KRBError.Unmarshal(b[6:m.MessageLength])
-		m.ResultCode, m.Result = parseResponse(m.KRBError.EData)
-	}
-	return nil
-}
-
-func parseResponse(b []byte) (c uint16, s string) {
-	c = binary.BigEndian.Uint16(b[0:2])
-	buf := bytes.NewBuffer(b[2:])
-	m := make([]byte, len(b)-2)
-	binary.Read(buf, binary.BigEndian, &m)
-	s = string(m)
-	return
-}
-
-// Decrypt the encrypted part of the KRBError within the change password Reply.
-func (m *Reply) Decrypt(key types.EncryptionKey) error {
-	if m.IsKRBError {
-		return m.KRBError
-	}
-	err := m.KRBPriv.DecryptEncPart(key)
-	if err != nil {
-		return err
-	}
-	m.ResultCode, m.Result = parseResponse(m.KRBPriv.DecryptedEncPart.UserData)
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/passwd.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/passwd.go
deleted file mode 100644
index 2a7491a..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/kadmin/passwd.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Package kadmin provides Kerberos administration capabilities.
-package kadmin
-
-import (
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/messages"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// ChangePasswdMsg generate a change password request and also return the key needed to decrypt the reply.
-func ChangePasswdMsg(cname types.PrincipalName, realm, password string, tkt messages.Ticket, sessionKey types.EncryptionKey) (r Request, k types.EncryptionKey, err error) {
-	// Create change password data struct and marshal to bytes
-	chgpasswd := ChangePasswdData{
-		NewPasswd: []byte(password),
-		TargName:  cname,
-		TargRealm: realm,
-	}
-	chpwdb, err := chgpasswd.Marshal()
-	if err != nil {
-		err = krberror.Errorf(err, krberror.KRBMsgError, "error marshaling change passwd data")
-		return
-	}
-
-	// Generate authenticator
-	auth, err := types.NewAuthenticator(realm, cname)
-	if err != nil {
-		err = krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator")
-		return
-	}
-	etype, err := crypto.GetEtype(sessionKey.KeyType)
-	if err != nil {
-		err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey etype")
-		return
-	}
-	err = auth.GenerateSeqNumberAndSubKey(etype.GetETypeID(), etype.GetKeyByteSize())
-	if err != nil {
-		err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey")
-		return
-	}
-	k = auth.SubKey
-
-	// Generate AP_REQ
-	APreq, err := messages.NewAPReq(tkt, sessionKey, auth)
-	if err != nil {
-		return
-	}
-
-	// Form the KRBPriv encpart data
-	kp := messages.EncKrbPrivPart{
-		UserData:       chpwdb,
-		Timestamp:      auth.CTime,
-		Usec:           auth.Cusec,
-		SequenceNumber: auth.SeqNumber,
-	}
-	kpriv := messages.NewKRBPriv(kp)
-	err = kpriv.EncryptEncPart(k)
-	if err != nil {
-		err = krberror.Errorf(err, krberror.EncryptingError, "error encrypting change passwd data")
-		return
-	}
-
-	r = Request{
-		APREQ:   APreq,
-		KRBPriv: kpriv,
-	}
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go
deleted file mode 100644
index 0c7fc38..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/keytab/keytab.go
+++ /dev/null
@@ -1,369 +0,0 @@
-// Package keytab implements Kerberos keytabs: https://web.mit.edu/kerberos/krb5-devel/doc/formats/keytab_file_format.html.
-package keytab
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"time"
-	"unsafe"
-
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-const (
-	keytabFirstByte byte = 05
-)
-
-// Keytab struct.
-type Keytab struct {
-	version uint8
-	Entries []entry
-}
-
-// Keytab entry struct.
-type entry struct {
-	Principal principal
-	Timestamp time.Time
-	KVNO8     uint8
-	Key       types.EncryptionKey
-	KVNO      uint32
-}
-
-// Keytab entry principal struct.
-type principal struct {
-	NumComponents int16
-	Realm         string
-	Components    []string
-	NameType      int32
-}
-
-// New creates new, empty Keytab type.
-func New() *Keytab {
-	var e []entry
-	return &Keytab{
-		version: 0,
-		Entries: e,
-	}
-}
-
-// GetEncryptionKey returns the EncryptionKey from the Keytab for the newest entry with the required kvno, etype and matching principal.
-func (kt *Keytab) GetEncryptionKey(princName types.PrincipalName, realm string, kvno int, etype int32) (types.EncryptionKey, error) {
-	//TODO (theme: KVNO from keytab) this function should return the kvno too
-	var key types.EncryptionKey
-	var t time.Time
-	for _, k := range kt.Entries {
-		if k.Principal.Realm == realm && len(k.Principal.Components) == len(princName.NameString) &&
-			k.Key.KeyType == etype &&
-			(k.KVNO == uint32(kvno) || kvno == 0) &&
-			k.Timestamp.After(t) {
-			p := true
-			for i, n := range k.Principal.Components {
-				if princName.NameString[i] != n {
-					p = false
-					break
-				}
-			}
-			if p {
-				key = k.Key
-				t = k.Timestamp
-			}
-		}
-	}
-	if len(key.KeyValue) < 1 {
-		return key, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype)
-	}
-	return key, nil
-}
-
-// Create a new Keytab entry.
-func newKeytabEntry() entry {
-	var b []byte
-	return entry{
-		Principal: newPrincipal(),
-		Timestamp: time.Time{},
-		KVNO8:     0,
-		Key: types.EncryptionKey{
-			KeyType:  0,
-			KeyValue: b,
-		},
-		KVNO: 0,
-	}
-}
-
-// Create a new principal.
-func newPrincipal() principal {
-	var c []string
-	return principal{
-		NumComponents: 0,
-		Realm:         "",
-		Components:    c,
-		NameType:      0,
-	}
-}
-
-// Load a Keytab file into a Keytab type.
-func Load(ktPath string) (*Keytab, error) {
-	kt := new(Keytab)
-	b, err := ioutil.ReadFile(ktPath)
-	if err != nil {
-		return kt, err
-	}
-	err = kt.Unmarshal(b)
-	return kt, err
-}
-
-// Marshal keytab into byte slice
-func (kt *Keytab) Marshal() ([]byte, error) {
-	b := []byte{keytabFirstByte, kt.version}
-	for _, e := range kt.Entries {
-		eb, err := e.marshal(int(kt.version))
-		if err != nil {
-			return b, err
-		}
-		b = append(b, eb...)
-	}
-	return b, nil
-}
-
-// Write the keytab bytes to io.Writer.
-// Returns the number of bytes written
-func (kt *Keytab) Write(w io.Writer) (int, error) {
-	b, err := kt.Marshal()
-	if err != nil {
-		return 0, fmt.Errorf("error marshaling keytab: %v", err)
-	}
-	return w.Write(b)
-}
-
-// Unmarshal byte slice of Keytab data into Keytab type.
-func (kt *Keytab) Unmarshal(b []byte) error {
-	//The first byte of the file always has the value 5
-	if b[0] != keytabFirstByte {
-		return errors.New("invalid keytab data. First byte does not equal 5")
-	}
-	//Get keytab version
-	//The 2nd byte contains the version number (1 or 2)
-	kt.version = b[1]
-	if kt.version != 1 && kt.version != 2 {
-		return errors.New("invalid keytab data. Keytab version is neither 1 nor 2")
-	}
-	//Version 1 of the file format uses native byte order for integer representations. Version 2 always uses big-endian byte order
-	var endian binary.ByteOrder
-	endian = binary.BigEndian
-	if kt.version == 1 && isNativeEndianLittle() {
-		endian = binary.LittleEndian
-	}
-	/*
-		After the two-byte version indicator, the file contains a sequence of signed 32-bit record lengths followed by key records or holes.
-		A positive record length indicates a valid key entry whose size is equal to or less than the record length.
-		A negative length indicates a zero-filled hole whose size is the inverse of the length.
-		A length of 0 indicates the end of the file.
-	*/
-	// n tracks position in the byte array
-	n := 2
-	l := readInt32(b, &n, &endian)
-	for l != 0 {
-		if l < 0 {
-			//Zero padded so skip over
-			l = l * -1
-			n = n + int(l)
-		} else {
-			//fmt.Printf("Bytes for entry: %v\n", b[n:n+int(l)])
-			eb := b[n : n+int(l)]
-			n = n + int(l)
-			ke := newKeytabEntry()
-			// p keeps track as to where we are in the byte stream
-			var p int
-			parsePrincipal(eb, &p, kt, &ke, &endian)
-			ke.Timestamp = readTimestamp(eb, &p, &endian)
-			ke.KVNO8 = uint8(readInt8(eb, &p, &endian))
-			ke.Key.KeyType = int32(readInt16(eb, &p, &endian))
-			kl := int(readInt16(eb, &p, &endian))
-			ke.Key.KeyValue = readBytes(eb, &p, kl, &endian)
-			//The 32-bit key version overrides the 8-bit key version.
-			// To determine if it is present, the implementation must check that at least 4 bytes remain in the record after the other fields are read,
-			// and that the value of the 32-bit integer contained in those bytes is non-zero.
-			if len(eb)-p >= 4 {
-				// The 32-bit key may be present
-				ke.KVNO = uint32(readInt32(eb, &p, &endian))
-			}
-			if ke.KVNO == 0 {
-				// Handles if the value from the last 4 bytes was zero and also if there are not the 4 bytes present. Makes sense to put the same value here as KVNO8
-				ke.KVNO = uint32(ke.KVNO8)
-			}
-			// Add the entry to the keytab
-			kt.Entries = append(kt.Entries, ke)
-		}
-		// Check if there are still 4 bytes left to read
-		if n > len(b) || len(b[n:]) < 4 {
-			break
-		}
-		// Read the size of the next entry
-		l = readInt32(b, &n, &endian)
-	}
-	return nil
-}
-
-func (e entry) marshal(v int) ([]byte, error) {
-	var b []byte
-	pb, err := e.Principal.marshal(v)
-	if err != nil {
-		return b, err
-	}
-	b = append(b, pb...)
-
-	var endian binary.ByteOrder
-	endian = binary.BigEndian
-	if v == 1 && isNativeEndianLittle() {
-		endian = binary.LittleEndian
-	}
-
-	t := make([]byte, 9)
-	endian.PutUint32(t[0:4], uint32(e.Timestamp.Unix()))
-	t[4] = e.KVNO8
-	endian.PutUint16(t[5:7], uint16(e.Key.KeyType))
-	endian.PutUint16(t[7:9], uint16(len(e.Key.KeyValue)))
-	b = append(b, t...)
-
-	buf := new(bytes.Buffer)
-	err = binary.Write(buf, endian, e.Key.KeyValue)
-	if err != nil {
-		return b, err
-	}
-	b = append(b, buf.Bytes()...)
-
-	t = make([]byte, 4)
-	endian.PutUint32(t, e.KVNO)
-	b = append(b, t...)
-
-	// Add the length header
-	t = make([]byte, 4)
-	endian.PutUint32(t, uint32(len(b)))
-	b = append(t, b...)
-	return b, nil
-}
-
-// Parse the Keytab bytes of a principal into a Keytab entry's principal.
-func parsePrincipal(b []byte, p *int, kt *Keytab, ke *entry, e *binary.ByteOrder) error {
-	ke.Principal.NumComponents = readInt16(b, p, e)
-	if kt.version == 1 {
-		//In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2
-		ke.Principal.NumComponents--
-	}
-	lenRealm := readInt16(b, p, e)
-	ke.Principal.Realm = string(readBytes(b, p, int(lenRealm), e))
-	for i := 0; i < int(ke.Principal.NumComponents); i++ {
-		l := readInt16(b, p, e)
-		ke.Principal.Components = append(ke.Principal.Components, string(readBytes(b, p, int(l), e)))
-	}
-	if kt.version != 1 {
-		//Name Type is omitted in version 1
-		ke.Principal.NameType = readInt32(b, p, e)
-	}
-	return nil
-}
-
-func (p principal) marshal(v int) ([]byte, error) {
-	//var b []byte
-	b := make([]byte, 2)
-	var endian binary.ByteOrder
-	endian = binary.BigEndian
-	if v == 1 && isNativeEndianLittle() {
-		endian = binary.LittleEndian
-	}
-	endian.PutUint16(b[0:], uint16(p.NumComponents))
-	realm, err := marshalString(p.Realm, v)
-	if err != nil {
-		return b, err
-	}
-	b = append(b, realm...)
-	for _, c := range p.Components {
-		cb, err := marshalString(c, v)
-		if err != nil {
-			return b, err
-		}
-		b = append(b, cb...)
-	}
-	if v != 1 {
-		t := make([]byte, 4)
-		endian.PutUint32(t, uint32(p.NameType))
-		b = append(b, t...)
-	}
-	return b, nil
-}
-
-func marshalString(s string, v int) ([]byte, error) {
-	sb := []byte(s)
-	b := make([]byte, 2)
-	var endian binary.ByteOrder
-	endian = binary.BigEndian
-	if v == 1 && isNativeEndianLittle() {
-		endian = binary.LittleEndian
-	}
-	endian.PutUint16(b[0:], uint16(len(sb)))
-	buf := new(bytes.Buffer)
-	err := binary.Write(buf, endian, sb)
-	if err != nil {
-		return b, err
-	}
-	b = append(b, buf.Bytes()...)
-	return b, err
-}
-
-// Read bytes representing a timestamp.
-func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time {
-	return time.Unix(int64(readInt32(b, p, e)), 0)
-}
-
-// Read bytes representing an eight bit integer.
-func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) {
-	buf := bytes.NewBuffer(b[*p : *p+1])
-	binary.Read(buf, *e, &i)
-	*p++
-	return
-}
-
-// Read bytes representing a sixteen bit integer.
-func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) {
-	buf := bytes.NewBuffer(b[*p : *p+2])
-	binary.Read(buf, *e, &i)
-	*p += 2
-	return
-}
-
-// Read bytes representing a thirty two bit integer.
-func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) {
-	buf := bytes.NewBuffer(b[*p : *p+4])
-	binary.Read(buf, *e, &i)
-	*p += 4
-	return
-}
-
-func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte {
-	buf := bytes.NewBuffer(b[*p : *p+s])
-	r := make([]byte, s)
-	binary.Read(buf, *e, &r)
-	*p += s
-	return r
-}
-
-func isNativeEndianLittle() bool {
-	var x = 0x012345678
-	var p = unsafe.Pointer(&x)
-	var bp = (*[4]byte)(p)
-
-	var endian bool
-	if 0x01 == bp[0] {
-		endian = false
-	} else if (0x78 & 0xff) == (bp[0] & 0xff) {
-		endian = true
-	} else {
-		// Default to big endian
-		endian = false
-	}
-	return endian
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/krberror/error.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/krberror/error.go
deleted file mode 100644
index d591bde..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/krberror/error.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Package krberror provides error type and functions for gokrb5.
-package krberror
-
-import (
-	"fmt"
-	"strings"
-)
-
-// Error type descriptions.
-const (
-	separator       = " < "
-	EncodingError   = "Encoding_Error"
-	NetworkingError = "Networking_Error"
-	DecryptingError = "Decrypting_Error"
-	EncryptingError = "Encrypting_Error"
-	ChksumError     = "Checksum_Error"
-	KRBMsgError     = "KRBMessage_Handling_Error"
-	ConfigError     = "Configuration_Error"
-	KDCError        = "KDC_Error"
-)
-
-// Krberror is an error type for gokrb5
-type Krberror struct {
-	RootCause string
-	EText     []string
-}
-
-// Error function to implement the error interface.
-func (e Krberror) Error() string {
-	return fmt.Sprintf("[Root cause: %s] ", e.RootCause) + strings.Join(e.EText, separator)
-}
-
-// Add another error statement to the error.
-func (e *Krberror) Add(et string, s string) {
-	e.EText = append([]string{fmt.Sprintf("%s: %s", et, s)}, e.EText...)
-}
-
-// NewKrberror creates a new instance of Krberror.
-func NewKrberror(et, s string) Krberror {
-	return Krberror{
-		RootCause: et,
-		EText:     []string{s},
-	}
-}
-
-// Errorf appends to or creates a new Krberror.
-func Errorf(err error, et, format string, a ...interface{}) Krberror {
-	if e, ok := err.(Krberror); ok {
-		e.Add(et, fmt.Sprintf(format, a...))
-		return e
-	}
-	return NewErrorf(et, format+": %s", append(a, err)...)
-}
-
-// NewErrorf creates a new Krberror from a formatted string.
-func NewErrorf(et, format string, a ...interface{}) Krberror {
-	var s string
-	if len(a) > 0 {
-		s = fmt.Sprintf("%s: %s", et, fmt.Sprintf(format, a...))
-	} else {
-		s = fmt.Sprintf("%s: %s", et, format)
-	}
-	return Krberror{
-		RootCause: et,
-		EText:     []string{s},
-	}
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APRep.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APRep.go
deleted file mode 100644
index 9c244f0..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APRep.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package messages
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-/*
-AP-REP          ::= [APPLICATION 15] SEQUENCE {
-pvno            [0] INTEGER (5),
-msg-type        [1] INTEGER (15),
-enc-part        [2] EncryptedData -- EncAPRepPart
-}
-
-EncAPRepPart    ::= [APPLICATION 27] SEQUENCE {
-        ctime           [0] KerberosTime,
-        cusec           [1] Microseconds,
-        subkey          [2] EncryptionKey OPTIONAL,
-        seq-number      [3] UInt32 OPTIONAL
-}
-*/
-
-// APRep implements RFC 4120 KRB_AP_REP: https://tools.ietf.org/html/rfc4120#section-5.5.2.
-type APRep struct {
-	PVNO    int                 `asn1:"explicit,tag:0"`
-	MsgType int                 `asn1:"explicit,tag:1"`
-	EncPart types.EncryptedData `asn1:"explicit,tag:2"`
-}
-
-// EncAPRepPart is the encrypted part of KRB_AP_REP.
-type EncAPRepPart struct {
-	CTime          time.Time           `asn1:"generalized,explicit,tag:0"`
-	Cusec          int                 `asn1:"explicit,tag:1"`
-	Subkey         types.EncryptionKey `asn1:"optional,explicit,tag:2"`
-	SequenceNumber int64               `asn1:"optional,explicit,tag:3"`
-}
-
-// Unmarshal bytes b into the APRep struct.
-func (a *APRep) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREP))
-	if err != nil {
-		return processUnmarshalReplyError(b, err)
-	}
-	expectedMsgType := msgtype.KRB_AP_REP
-	if a.MsgType != expectedMsgType {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_AP_REP. Expected: %v; Actual: %v", expectedMsgType, a.MsgType)
-	}
-	return nil
-}
-
-// Unmarshal bytes b into the APRep encrypted part struct.
-func (a *EncAPRepPart) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncAPRepPart))
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "AP_REP unmarshal error")
-	}
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APReq.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APReq.go
deleted file mode 100644
index e1ed4ae..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/APReq.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package messages
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/errorcode"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/keytab"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-/*AP-REQ          ::= [APPLICATION 14] SEQUENCE {
-pvno            [0] INTEGER (5),
-msg-type        [1] INTEGER (14),
-ap-options      [2] APOptions,
-ticket          [3] Ticket,
-authenticator   [4] EncryptedData -- Authenticator
-}
-
-APOptions       ::= KerberosFlags
--- reserved(0),
--- use-session-key(1),
--- mutual-required(2)*/
-
-type marshalAPReq struct {
-	PVNO      int            `asn1:"explicit,tag:0"`
-	MsgType   int            `asn1:"explicit,tag:1"`
-	APOptions asn1.BitString `asn1:"explicit,tag:2"`
-	// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
-	Ticket                 asn1.RawValue       `asn1:"explicit,tag:3"`
-	EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"`
-}
-
-// APReq implements RFC 4120 KRB_AP_REQ: https://tools.ietf.org/html/rfc4120#section-5.5.1.
-type APReq struct {
-	PVNO                   int                 `asn1:"explicit,tag:0"`
-	MsgType                int                 `asn1:"explicit,tag:1"`
-	APOptions              asn1.BitString      `asn1:"explicit,tag:2"`
-	Ticket                 Ticket              `asn1:"explicit,tag:3"`
-	EncryptedAuthenticator types.EncryptedData `asn1:"explicit,tag:4"`
-	Authenticator          types.Authenticator `asn1:"optional"`
-}
-
-// NewAPReq generates a new KRB_AP_REQ struct.
-func NewAPReq(tkt Ticket, sessionKey types.EncryptionKey, auth types.Authenticator) (APReq, error) {
-	var a APReq
-	ed, err := encryptAuthenticator(auth, sessionKey, tkt)
-	if err != nil {
-		return a, krberror.Errorf(err, krberror.KRBMsgError, "error creating Authenticator for AP_REQ")
-	}
-	a = APReq{
-		PVNO:                   iana.PVNO,
-		MsgType:                msgtype.KRB_AP_REQ,
-		APOptions:              types.NewKrbFlags(),
-		Ticket:                 tkt,
-		EncryptedAuthenticator: ed,
-	}
-	return a, nil
-}
-
-// Encrypt Authenticator
-func encryptAuthenticator(a types.Authenticator, sessionKey types.EncryptionKey, tkt Ticket) (types.EncryptedData, error) {
-	var ed types.EncryptedData
-	m, err := a.Marshal()
-	if err != nil {
-		return ed, krberror.Errorf(err, krberror.EncodingError, "marshaling error of EncryptedData form of Authenticator")
-	}
-	usage := authenticatorKeyUsage(tkt.SName)
-	ed, err = crypto.GetEncryptedData(m, sessionKey, uint32(usage), tkt.EncPart.KVNO)
-	if err != nil {
-		return ed, krberror.Errorf(err, krberror.EncryptingError, "error encrypting Authenticator")
-	}
-	return ed, nil
-}
-
-// DecryptAuthenticator decrypts the Authenticator within the AP_REQ.
-// sessionKey may simply be the key within the decrypted EncPart of the ticket within the AP_REQ.
-func (a *APReq) DecryptAuthenticator(sessionKey types.EncryptionKey) error {
-	usage := authenticatorKeyUsage(a.Ticket.SName)
-	ab, e := crypto.DecryptEncPart(a.EncryptedAuthenticator, sessionKey, uint32(usage))
-	if e != nil {
-		return fmt.Errorf("error decrypting authenticator: %v", e)
-	}
-	err := a.Authenticator.Unmarshal(ab)
-	if err != nil {
-		return fmt.Errorf("error unmarshaling authenticator: %v", err)
-	}
-	return nil
-}
-
-func authenticatorKeyUsage(pn types.PrincipalName) int {
-	if pn.NameString[0] == "krbtgt" {
-		return keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR
-	}
-	return keyusage.AP_REQ_AUTHENTICATOR
-}
-
-// Unmarshal bytes b into the APReq struct.
-func (a *APReq) Unmarshal(b []byte) error {
-	var m marshalAPReq
-	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREQ))
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "unmarshal error of AP_REQ")
-	}
-	if m.MsgType != msgtype.KRB_AP_REQ {
-		return NewKRBError(types.PrincipalName{}, "", errorcode.KRB_AP_ERR_MSG_TYPE, errorcode.Lookup(errorcode.KRB_AP_ERR_MSG_TYPE))
-	}
-	a.PVNO = m.PVNO
-	a.MsgType = m.MsgType
-	a.APOptions = m.APOptions
-	a.EncryptedAuthenticator = m.EncryptedAuthenticator
-	a.Ticket, err = unmarshalTicket(m.Ticket.Bytes)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "unmarshaling error of Ticket within AP_REQ")
-	}
-	return nil
-}
-
-// Marshal APReq struct.
-func (a *APReq) Marshal() ([]byte, error) {
-	m := marshalAPReq{
-		PVNO:                   a.PVNO,
-		MsgType:                a.MsgType,
-		APOptions:              a.APOptions,
-		EncryptedAuthenticator: a.EncryptedAuthenticator,
-	}
-	var b []byte
-	b, err := a.Ticket.Marshal()
-	if err != nil {
-		return b, err
-	}
-	m.Ticket = asn1.RawValue{
-		Class:      asn1.ClassContextSpecific,
-		IsCompound: true,
-		Tag:        3,
-		Bytes:      b,
-	}
-	mk, err := asn1.Marshal(m)
-	if err != nil {
-		return mk, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AP_REQ")
-	}
-	mk = asn1tools.AddASNAppTag(mk, asnAppTag.APREQ)
-	return mk, nil
-}
-
-// Verify an AP_REQ using service's keytab, spn and max acceptable clock skew duration.
-// The service ticket encrypted part and authenticator will be decrypted as part of this operation.
-func (a *APReq) Verify(kt *keytab.Keytab, d time.Duration, cAddr types.HostAddress) (bool, error) {
-	// Decrypt ticket's encrypted part with service key
-	//TODO decrypt with service's session key from its TGT is use-to-user. Need to figure out how to get TGT.
-	//if types.IsFlagSet(&a.APOptions, flags.APOptionUseSessionKey) {
-	//	//If the USE-SESSION-KEY flag is set in the ap-options field, it indicates to
-	//	//the server that user-to-user authentication is in use, and that the ticket
-	//	//is encrypted in the session key from the server's TGT rather than in the server's secret key.
-	//	err := a.Ticket.Decrypt(tgt.DecryptedEncPart.Key)
-	//	if err != nil {
-	//		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of ticket provided using session key")
-	//	}
-	//} else {
-	//	// Because it is possible for the server to be registered in multiple
-	//	// realms, with different keys in each, the srealm field in the
-	//	// unencrypted portion of the ticket in the KRB_AP_REQ is used to
-	//	// specify which secret key the server should use to decrypt that
-	//	// ticket.The KRB_AP_ERR_NOKEY error code is returned if the server
-	//	// doesn't have the proper key to decipher the ticket.
-	//	// The ticket is decrypted using the version of the server's key
-	//	// specified by the ticket.
-	//	err := a.Ticket.DecryptEncPart(*kt, &a.Ticket.SName)
-	//	if err != nil {
-	//		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided")
-	//	}
-	//}
-	err := a.Ticket.DecryptEncPart(kt, &a.Ticket.SName)
-	if err != nil {
-		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting encpart of service ticket provided")
-	}
-
-	// Check time validity of ticket
-	ok, err := a.Ticket.Valid(d)
-	if err != nil || !ok {
-		return ok, err
-	}
-
-	// Check client's address is listed in the client addresses in the ticket
-	if len(a.Ticket.DecryptedEncPart.CAddr) > 0 {
-		//The addresses in the ticket (if any) are then searched for an address matching the operating-system reported
-		//address of the client.  If no match is found or the server insists on ticket addresses but none are present in
-		//the ticket, the KRB_AP_ERR_BADADDR error is returned.
-		if !types.HostAddressesContains(a.Ticket.DecryptedEncPart.CAddr, cAddr) {
-			return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADADDR, "client address not within the list contained in the service ticket")
-		}
-	}
-
-	// Decrypt authenticator with session key from ticket's encrypted part
-	err = a.DecryptAuthenticator(a.Ticket.DecryptedEncPart.Key)
-	if err != nil {
-		return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BAD_INTEGRITY, "could not decrypt authenticator")
-	}
-
-	// Check CName in authenticator is the same as that in the ticket
-	if !a.Authenticator.CName.Equal(a.Ticket.DecryptedEncPart.CName) {
-		return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_BADMATCH, "CName in Authenticator does not match that in service ticket")
-	}
-
-	// Check the clock skew between the client and the service server
-	ct := a.Authenticator.CTime.Add(time.Duration(a.Authenticator.Cusec) * time.Microsecond)
-	t := time.Now().UTC()
-	if t.Sub(ct) > d || ct.Sub(t) > d {
-		return false, NewKRBError(a.Ticket.SName, a.Ticket.Realm, errorcode.KRB_AP_ERR_SKEW, fmt.Sprintf("clock skew with client too large. greater than %v seconds", d))
-	}
-	return true, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCRep.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCRep.go
deleted file mode 100644
index 76c89c3..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCRep.go
+++ /dev/null
@@ -1,312 +0,0 @@
-package messages
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.4.2
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/config"
-	"gopkg.in/jcmturner/gokrb5.v7/credentials"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/flags"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/patype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-type marshalKDCRep struct {
-	PVNO    int                  `asn1:"explicit,tag:0"`
-	MsgType int                  `asn1:"explicit,tag:1"`
-	PAData  types.PADataSequence `asn1:"explicit,optional,tag:2"`
-	CRealm  string               `asn1:"generalstring,explicit,tag:3"`
-	CName   types.PrincipalName  `asn1:"explicit,tag:4"`
-	// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
-	Ticket  asn1.RawValue       `asn1:"explicit,tag:5"`
-	EncPart types.EncryptedData `asn1:"explicit,tag:6"`
-}
-
-// KDCRepFields represents the KRB_KDC_REP fields.
-type KDCRepFields struct {
-	PVNO             int
-	MsgType          int
-	PAData           []types.PAData
-	CRealm           string
-	CName            types.PrincipalName
-	Ticket           Ticket
-	EncPart          types.EncryptedData
-	DecryptedEncPart EncKDCRepPart
-}
-
-// ASRep implements RFC 4120 KRB_AS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2.
-type ASRep struct {
-	KDCRepFields
-}
-
-// TGSRep implements RFC 4120 KRB_TGS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2.
-type TGSRep struct {
-	KDCRepFields
-}
-
-// EncKDCRepPart is the encrypted part of KRB_KDC_REP.
-type EncKDCRepPart struct {
-	Key           types.EncryptionKey  `asn1:"explicit,tag:0"`
-	LastReqs      []LastReq            `asn1:"explicit,tag:1"`
-	Nonce         int                  `asn1:"explicit,tag:2"`
-	KeyExpiration time.Time            `asn1:"generalized,explicit,optional,tag:3"`
-	Flags         asn1.BitString       `asn1:"explicit,tag:4"`
-	AuthTime      time.Time            `asn1:"generalized,explicit,tag:5"`
-	StartTime     time.Time            `asn1:"generalized,explicit,optional,tag:6"`
-	EndTime       time.Time            `asn1:"generalized,explicit,tag:7"`
-	RenewTill     time.Time            `asn1:"generalized,explicit,optional,tag:8"`
-	SRealm        string               `asn1:"generalstring,explicit,tag:9"`
-	SName         types.PrincipalName  `asn1:"explicit,tag:10"`
-	CAddr         []types.HostAddress  `asn1:"explicit,optional,tag:11"`
-	EncPAData     types.PADataSequence `asn1:"explicit,optional,tag:12"`
-}
-
-// LastReq part of KRB_KDC_REP.
-type LastReq struct {
-	LRType  int32     `asn1:"explicit,tag:0"`
-	LRValue time.Time `asn1:"generalized,explicit,tag:1"`
-}
-
-// Unmarshal bytes b into the ASRep struct.
-func (k *ASRep) Unmarshal(b []byte) error {
-	var m marshalKDCRep
-	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREP))
-	if err != nil {
-		return processUnmarshalReplyError(b, err)
-	}
-	if m.MsgType != msgtype.KRB_AS_REP {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an AS_REP. Expected: %v; Actual: %v", msgtype.KRB_AS_REP, m.MsgType)
-	}
-	//Process the raw ticket within
-	tkt, err := unmarshalTicket(m.Ticket.Bytes)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within AS_REP")
-	}
-	k.KDCRepFields = KDCRepFields{
-		PVNO:    m.PVNO,
-		MsgType: m.MsgType,
-		PAData:  m.PAData,
-		CRealm:  m.CRealm,
-		CName:   m.CName,
-		Ticket:  tkt,
-		EncPart: m.EncPart,
-	}
-	return nil
-}
-
-// Unmarshal bytes b into the TGSRep struct.
-func (k *TGSRep) Unmarshal(b []byte) error {
-	var m marshalKDCRep
-	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREP))
-	if err != nil {
-		return processUnmarshalReplyError(b, err)
-	}
-	if m.MsgType != msgtype.KRB_TGS_REP {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an TGS_REP. Expected: %v; Actual: %v", msgtype.KRB_TGS_REP, m.MsgType)
-	}
-	//Process the raw ticket within
-	tkt, err := unmarshalTicket(m.Ticket.Bytes)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within TGS_REP")
-	}
-	k.KDCRepFields = KDCRepFields{
-		PVNO:    m.PVNO,
-		MsgType: m.MsgType,
-		PAData:  m.PAData,
-		CRealm:  m.CRealm,
-		CName:   m.CName,
-		Ticket:  tkt,
-		EncPart: m.EncPart,
-	}
-	return nil
-}
-
-// Unmarshal bytes b into encrypted part of KRB_KDC_REP.
-func (e *EncKDCRepPart) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncASRepPart))
-	if err != nil {
-		// Try using tag 26
-		/* Ref: RFC 4120
-		Compatibility note: Some implementations unconditionally send an
-		encrypted EncTGSRepPart (application tag number 26) in this field
-		regardless of whether the reply is a AS-REP or a TGS-REP.  In the
-		interest of compatibility, implementors MAY relax the check on the
-		tag number of the decrypted ENC-PART.*/
-		_, err = asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncTGSRepPart))
-		if err != nil {
-			return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part within KDC_REP")
-		}
-	}
-	return nil
-}
-
-// DecryptEncPart decrypts the encrypted part of an AS_REP.
-func (k *ASRep) DecryptEncPart(c *credentials.Credentials) (types.EncryptionKey, error) {
-	var key types.EncryptionKey
-	var err error
-	if c.HasKeytab() {
-		key, err = c.Keytab().GetEncryptionKey(k.CName, k.CRealm, k.EncPart.KVNO, k.EncPart.EType)
-		if err != nil {
-			return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part")
-		}
-	}
-	if c.HasPassword() {
-		key, _, err = crypto.GetKeyFromPassword(c.Password(), k.CName, k.CRealm, k.EncPart.EType, k.PAData)
-		if err != nil {
-			return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part")
-		}
-	}
-	if !c.HasKeytab() && !c.HasPassword() {
-		return key, krberror.NewErrorf(krberror.DecryptingError, "no secret available in credentials to perform decryption of AS_REP encrypted part")
-	}
-	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.AS_REP_ENCPART)
-	if err != nil {
-		return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part")
-	}
-	var denc EncKDCRepPart
-	err = denc.Unmarshal(b)
-	if err != nil {
-		return key, krberror.Errorf(err, krberror.EncodingError, "error unmarshaling decrypted encpart of AS_REP")
-	}
-	k.DecryptedEncPart = denc
-	return key, nil
-}
-
-// Verify checks the validity of AS_REP message.
-func (k *ASRep) Verify(cfg *config.Config, creds *credentials.Credentials, asReq ASReq) (bool, error) {
-	//Ref RFC 4120 Section 3.1.5
-	if k.CName.NameType != asReq.ReqBody.CName.NameType || k.CName.NameString == nil {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName)
-	}
-	for i := range k.CName.NameString {
-		if k.CName.NameString[i] != asReq.ReqBody.CName.NameString[i] {
-			return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName)
-		}
-	}
-	if k.CRealm != asReq.ReqBody.Realm {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "CRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.CRealm)
-	}
-	key, err := k.DecryptEncPart(creds)
-	if err != nil {
-		return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting EncPart of AS_REP")
-	}
-	if k.DecryptedEncPart.Nonce != asReq.ReqBody.Nonce {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request")
-	}
-	if k.DecryptedEncPart.SName.NameType != asReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", asReq.ReqBody.SName, k.DecryptedEncPart.SName)
-	}
-	for i := range k.CName.NameString {
-		if k.DecryptedEncPart.SName.NameString[i] != asReq.ReqBody.SName.NameString[i] {
-			return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.SName, k.DecryptedEncPart.SName)
-		}
-	}
-	if k.DecryptedEncPart.SRealm != asReq.ReqBody.Realm {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.DecryptedEncPart.SRealm)
-	}
-	if len(asReq.ReqBody.Addresses) > 0 {
-		if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, asReq.ReqBody.Addresses) {
-			return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the AS_REP does not match those listed in the AS_REQ")
-		}
-	}
-	t := time.Now().UTC()
-	if t.Sub(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(t) > cfg.LibDefaults.Clockskew {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds", cfg.LibDefaults.Clockskew.Seconds())
-	}
-	// RFC 6806 https://tools.ietf.org/html/rfc6806.html#section-11
-	if asReq.PAData.Contains(patype.PA_REQ_ENC_PA_REP) && types.IsFlagSet(&k.DecryptedEncPart.Flags, flags.EncPARep) {
-		if len(k.DecryptedEncPart.EncPAData) < 2 || !k.DecryptedEncPart.EncPAData.Contains(patype.PA_FX_FAST) {
-			return false, krberror.NewErrorf(krberror.KRBMsgError, "KDC did not respond appropriately to FAST negotiation")
-		}
-		for _, pa := range k.DecryptedEncPart.EncPAData {
-			if pa.PADataType == patype.PA_REQ_ENC_PA_REP {
-				var pafast types.PAReqEncPARep
-				err := pafast.Unmarshal(pa.PADataValue)
-				if err != nil {
-					return false, krberror.Errorf(err, krberror.EncodingError, "KDC FAST negotiation response error, could not unmarshal PA_REQ_ENC_PA_REP")
-				}
-				etype, err := crypto.GetChksumEtype(pafast.ChksumType)
-				if err != nil {
-					return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response error")
-				}
-				ab, _ := asReq.Marshal()
-				if !etype.VerifyChecksum(key.KeyValue, ab, pafast.Chksum, keyusage.KEY_USAGE_AS_REQ) {
-					return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response checksum invalid")
-				}
-			}
-		}
-	}
-	return true, nil
-}
-
-// DecryptEncPart decrypts the encrypted part of an TGS_REP.
-func (k *TGSRep) DecryptEncPart(key types.EncryptionKey) error {
-	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.TGS_REP_ENCPART_SESSION_KEY)
-	if err != nil {
-		return krberror.Errorf(err, krberror.DecryptingError, "error decrypting TGS_REP EncPart")
-	}
-	var denc EncKDCRepPart
-	err = denc.Unmarshal(b)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part")
-	}
-	k.DecryptedEncPart = denc
-	return nil
-}
-
-// Verify checks the validity of the TGS_REP message.
-func (k *TGSRep) Verify(cfg *config.Config, tgsReq TGSReq) (bool, error) {
-	if k.CName.NameType != tgsReq.ReqBody.CName.NameType || k.CName.NameString == nil {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "CName type in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName)
-	}
-	for i := range k.CName.NameString {
-		if k.CName.NameString[i] != tgsReq.ReqBody.CName.NameString[i] {
-			return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName)
-		}
-	}
-	if k.Ticket.Realm != tgsReq.ReqBody.Realm {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "realm in response ticket does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.Ticket.Realm)
-	}
-	if k.DecryptedEncPart.Nonce != tgsReq.ReqBody.Nonce {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request")
-	}
-	//if k.Ticket.SName.NameType != tgsReq.ReqBody.SName.NameType || k.Ticket.SName.NameString == nil {
-	//	return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.Ticket.SName)
-	//}
-	//for i := range k.Ticket.SName.NameString {
-	//	if k.Ticket.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] {
-	//		return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.Ticket.SName)
-	//	}
-	//}
-	//if k.DecryptedEncPart.SName.NameType != tgsReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil {
-	//	return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName)
-	//}
-	//for i := range k.DecryptedEncPart.SName.NameString {
-	//	if k.DecryptedEncPart.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] {
-	//		return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName)
-	//	}
-	//}
-	if k.DecryptedEncPart.SRealm != tgsReq.ReqBody.Realm {
-		return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.DecryptedEncPart.SRealm)
-	}
-	if len(k.DecryptedEncPart.CAddr) > 0 {
-		if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, tgsReq.ReqBody.Addresses) {
-			return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the TGS_REP does not match those listed in the TGS_REQ")
-		}
-	}
-	if time.Since(k.DecryptedEncPart.StartTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.StartTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew {
-		if time.Since(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew {
-			return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds.", cfg.LibDefaults.Clockskew.Seconds())
-		}
-	}
-	return true, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCReq.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCReq.go
deleted file mode 100644
index f75ddc4..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KDCReq.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package messages
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.4.1
-
-import (
-	"crypto/rand"
-	"fmt"
-	"math"
-	"math/big"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
-	"gopkg.in/jcmturner/gokrb5.v7/config"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/flags"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/nametype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/patype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-type marshalKDCReq struct {
-	PVNO    int                  `asn1:"explicit,tag:1"`
-	MsgType int                  `asn1:"explicit,tag:2"`
-	PAData  types.PADataSequence `asn1:"explicit,optional,tag:3"`
-	ReqBody asn1.RawValue        `asn1:"explicit,tag:4"`
-}
-
-// KDCReqFields represents the KRB_KDC_REQ fields.
-type KDCReqFields struct {
-	PVNO    int
-	MsgType int
-	PAData  types.PADataSequence
-	ReqBody KDCReqBody
-	Renewal bool
-}
-
-// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
-type ASReq struct {
-	KDCReqFields
-}
-
-// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1.
-type TGSReq struct {
-	KDCReqFields
-}
-
-type marshalKDCReqBody struct {
-	KDCOptions  asn1.BitString      `asn1:"explicit,tag:0"`
-	CName       types.PrincipalName `asn1:"explicit,optional,tag:1"`
-	Realm       string              `asn1:"generalstring,explicit,tag:2"`
-	SName       types.PrincipalName `asn1:"explicit,optional,tag:3"`
-	From        time.Time           `asn1:"generalized,explicit,optional,tag:4"`
-	Till        time.Time           `asn1:"generalized,explicit,tag:5"`
-	RTime       time.Time           `asn1:"generalized,explicit,optional,tag:6"`
-	Nonce       int                 `asn1:"explicit,tag:7"`
-	EType       []int32             `asn1:"explicit,tag:8"`
-	Addresses   []types.HostAddress `asn1:"explicit,optional,tag:9"`
-	EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"`
-	// Ticket needs to be a raw value as it is wrapped in an APPLICATION tag
-	AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"`
-}
-
-// KDCReqBody implements the KRB_KDC_REQ request body.
-type KDCReqBody struct {
-	KDCOptions        asn1.BitString      `asn1:"explicit,tag:0"`
-	CName             types.PrincipalName `asn1:"explicit,optional,tag:1"`
-	Realm             string              `asn1:"generalstring,explicit,tag:2"`
-	SName             types.PrincipalName `asn1:"explicit,optional,tag:3"`
-	From              time.Time           `asn1:"generalized,explicit,optional,tag:4"`
-	Till              time.Time           `asn1:"generalized,explicit,tag:5"`
-	RTime             time.Time           `asn1:"generalized,explicit,optional,tag:6"`
-	Nonce             int                 `asn1:"explicit,tag:7"`
-	EType             []int32             `asn1:"explicit,tag:8"`
-	Addresses         []types.HostAddress `asn1:"explicit,optional,tag:9"`
-	EncAuthData       types.EncryptedData `asn1:"explicit,optional,tag:10"`
-	AdditionalTickets []Ticket            `asn1:"explicit,optional,tag:11"`
-}
-
-// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request.
-func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
-	sname := types.PrincipalName{
-		NameType:   nametype.KRB_NT_SRV_INST,
-		NameString: []string{"krbtgt", realm},
-	}
-	return NewASReq(realm, c, cname, sname)
-}
-
-// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request.
-func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) {
-	sname := types.PrincipalName{
-		NameType:   nametype.KRB_NT_PRINCIPAL,
-		NameString: []string{"kadmin", "changepw"},
-	}
-	return NewASReq(realm, c, cname, sname)
-}
-
-// NewASReq generates a new KRB_AS_REQ struct for a given SNAME.
-func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) {
-	nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
-	if err != nil {
-		return ASReq{}, err
-	}
-	t := time.Now().UTC()
-	// Copy the default options to make this thread safe
-	kopts := types.NewKrbFlags()
-	copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes)
-	kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength
-	a := ASReq{
-		KDCReqFields{
-			PVNO:    iana.PVNO,
-			MsgType: msgtype.KRB_AS_REQ,
-			PAData:  types.PADataSequence{},
-			ReqBody: KDCReqBody{
-				KDCOptions: kopts,
-				Realm:      realm,
-				CName:      cname,
-				SName:      sname,
-				Till:       t.Add(c.LibDefaults.TicketLifetime),
-				Nonce:      int(nonce.Int64()),
-				EType:      c.LibDefaults.DefaultTktEnctypeIDs,
-			},
-		},
-	}
-	if c.LibDefaults.Forwardable {
-		types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable)
-	}
-	if c.LibDefaults.Canonicalize {
-		types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize)
-	}
-	if c.LibDefaults.Proxiable {
-		types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable)
-	}
-	if c.LibDefaults.RenewLifetime != 0 {
-		types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable)
-		a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
-		a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour)
-	}
-	if !c.LibDefaults.NoAddresses {
-		ha, err := types.LocalHostAddresses()
-		if err != nil {
-			return a, fmt.Errorf("could not get local addresses: %v", err)
-		}
-		ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
-		a.ReqBody.Addresses = ha
-	}
-	return a, nil
-}
-
-// NewTGSReq generates a new KRB_TGS_REQ struct.
-func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tgt Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool) (TGSReq, error) {
-	a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
-	if err != nil {
-		return a, err
-	}
-	err = a.setPAData(tgt, sessionKey)
-	return a, err
-}
-
-// NewUser2UserTGSReq returns a TGS-REQ suitable for user-to-user authentication (https://tools.ietf.org/html/rfc4120#section-3.7)
-func NewUser2UserTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, clientTGT Ticket, sessionKey types.EncryptionKey, sname types.PrincipalName, renewal bool, verifyingTGT Ticket) (TGSReq, error) {
-	a, err := tgsReq(cname, sname, kdcRealm, renewal, c)
-	if err != nil {
-		return a, err
-	}
-	a.ReqBody.AdditionalTickets = []Ticket{verifyingTGT}
-	types.SetFlag(&a.ReqBody.KDCOptions, flags.EncTktInSkey)
-	err = a.setPAData(clientTGT, sessionKey)
-	return a, err
-}
-
-// tgsReq populates the fields for a TGS_REQ
-func tgsReq(cname, sname types.PrincipalName, kdcRealm string, renewal bool, c *config.Config) (TGSReq, error) {
-	nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
-	if err != nil {
-		return TGSReq{}, err
-	}
-	t := time.Now().UTC()
-	k := KDCReqFields{
-		PVNO:    iana.PVNO,
-		MsgType: msgtype.KRB_TGS_REQ,
-		ReqBody: KDCReqBody{
-			KDCOptions: types.NewKrbFlags(),
-			Realm:      kdcRealm,
-			CName:      cname, // Add the CName to make validation of the reply easier
-			SName:      sname,
-			Till:       t.Add(c.LibDefaults.TicketLifetime),
-			Nonce:      int(nonce.Int64()),
-			EType:      c.LibDefaults.DefaultTGSEnctypeIDs,
-		},
-		Renewal: renewal,
-	}
-	if c.LibDefaults.Forwardable {
-		types.SetFlag(&k.ReqBody.KDCOptions, flags.Forwardable)
-	}
-	if c.LibDefaults.Canonicalize {
-		types.SetFlag(&k.ReqBody.KDCOptions, flags.Canonicalize)
-	}
-	if c.LibDefaults.Proxiable {
-		types.SetFlag(&k.ReqBody.KDCOptions, flags.Proxiable)
-	}
-	if c.LibDefaults.RenewLifetime > time.Duration(0) {
-		types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
-		k.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime)
-	}
-	if !c.LibDefaults.NoAddresses {
-		ha, err := types.LocalHostAddresses()
-		if err != nil {
-			return TGSReq{}, fmt.Errorf("could not get local addresses: %v", err)
-		}
-		ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...)
-		k.ReqBody.Addresses = ha
-	}
-	if renewal {
-		types.SetFlag(&k.ReqBody.KDCOptions, flags.Renew)
-		types.SetFlag(&k.ReqBody.KDCOptions, flags.Renewable)
-	}
-	return TGSReq{
-		k,
-	}, nil
-}
-
-func (k *TGSReq) setPAData(tgt Ticket, sessionKey types.EncryptionKey) error {
-	// Marshal the request and calculate checksum
-	b, err := k.ReqBody.Marshal()
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body")
-	}
-	etype, err := crypto.GetEtype(sessionKey.KeyType)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator")
-	}
-	cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM)
-	if err != nil {
-		return krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash")
-	}
-
-	// Form PAData for TGS_REQ
-	// Create authenticator
-	auth, err := types.NewAuthenticator(tgt.Realm, k.ReqBody.CName)
-	if err != nil {
-		return krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator")
-	}
-	auth.Cksum = types.Checksum{
-		CksumType: etype.GetHashID(),
-		Checksum:  cb,
-	}
-	// Create AP_REQ
-	apReq, err := NewAPReq(tgt, sessionKey, auth)
-	if err != nil {
-		return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ")
-	}
-	apb, err := apReq.Marshal()
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data")
-	}
-	k.PAData = types.PADataSequence{
-		types.PAData{
-			PADataType:  patype.PA_TGS_REQ,
-			PADataValue: apb,
-		},
-	}
-	return nil
-}
-
-// Unmarshal bytes b into the ASReq struct.
-func (k *ASReq) Unmarshal(b []byte) error {
-	var m marshalKDCReq
-	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ))
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ")
-	}
-	expectedMsgType := msgtype.KRB_AS_REQ
-	if m.MsgType != expectedMsgType {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
-	}
-	var reqb KDCReqBody
-	err = reqb.Unmarshal(m.ReqBody.Bytes)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body")
-	}
-	k.MsgType = m.MsgType
-	k.PAData = m.PAData
-	k.PVNO = m.PVNO
-	k.ReqBody = reqb
-	return nil
-}
-
-// Unmarshal bytes b into the TGSReq struct.
-func (k *TGSReq) Unmarshal(b []byte) error {
-	var m marshalKDCReq
-	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ))
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ")
-	}
-	expectedMsgType := msgtype.KRB_TGS_REQ
-	if m.MsgType != expectedMsgType {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
-	}
-	var reqb KDCReqBody
-	err = reqb.Unmarshal(m.ReqBody.Bytes)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body")
-	}
-	k.MsgType = m.MsgType
-	k.PAData = m.PAData
-	k.PVNO = m.PVNO
-	k.ReqBody = reqb
-	return nil
-}
-
-// Unmarshal bytes b into the KRB_KDC_REQ body struct.
-func (k *KDCReqBody) Unmarshal(b []byte) error {
-	var m marshalKDCReqBody
-	_, err := asn1.Unmarshal(b, &m)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body")
-	}
-	k.KDCOptions = m.KDCOptions
-	if len(k.KDCOptions.Bytes) < 4 {
-		tb := make([]byte, 4-len(k.KDCOptions.Bytes))
-		k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...)
-		k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8
-	}
-	k.CName = m.CName
-	k.Realm = m.Realm
-	k.SName = m.SName
-	k.From = m.From
-	k.Till = m.Till
-	k.RTime = m.RTime
-	k.Nonce = m.Nonce
-	k.EType = m.EType
-	k.Addresses = m.Addresses
-	k.EncAuthData = m.EncAuthData
-	if len(m.AdditionalTickets.Bytes) > 0 {
-		k.AdditionalTickets, err = unmarshalTicketsSequence(m.AdditionalTickets)
-		if err != nil {
-			return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets")
-		}
-	}
-	return nil
-}
-
-// Marshal ASReq struct.
-func (k *ASReq) Marshal() ([]byte, error) {
-	m := marshalKDCReq{
-		PVNO:    k.PVNO,
-		MsgType: k.MsgType,
-		PAData:  k.PAData,
-	}
-	b, err := k.ReqBody.Marshal()
-	if err != nil {
-		var mk []byte
-		return mk, err
-	}
-	m.ReqBody = asn1.RawValue{
-		Class:      asn1.ClassContextSpecific,
-		IsCompound: true,
-		Tag:        4,
-		Bytes:      b,
-	}
-	mk, err := asn1.Marshal(m)
-	if err != nil {
-		return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
-	}
-	mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ)
-	return mk, nil
-}
-
-// Marshal TGSReq struct.
-func (k *TGSReq) Marshal() ([]byte, error) {
-	m := marshalKDCReq{
-		PVNO:    k.PVNO,
-		MsgType: k.MsgType,
-		PAData:  k.PAData,
-	}
-	b, err := k.ReqBody.Marshal()
-	if err != nil {
-		var mk []byte
-		return mk, err
-	}
-	m.ReqBody = asn1.RawValue{
-		Class:      asn1.ClassContextSpecific,
-		IsCompound: true,
-		Tag:        4,
-		Bytes:      b,
-	}
-	mk, err := asn1.Marshal(m)
-	if err != nil {
-		return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ")
-	}
-	mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ)
-	return mk, nil
-}
-
-// Marshal KRB_KDC_REQ body struct.
-func (k *KDCReqBody) Marshal() ([]byte, error) {
-	var b []byte
-	m := marshalKDCReqBody{
-		KDCOptions:  k.KDCOptions,
-		CName:       k.CName,
-		Realm:       k.Realm,
-		SName:       k.SName,
-		From:        k.From,
-		Till:        k.Till,
-		RTime:       k.RTime,
-		Nonce:       k.Nonce,
-		EType:       k.EType,
-		Addresses:   k.Addresses,
-		EncAuthData: k.EncAuthData,
-	}
-	rawtkts, err := MarshalTicketSequence(k.AdditionalTickets)
-	if err != nil {
-		return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets")
-	}
-	//The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody
-	rawtkts.Tag = 11
-	if len(rawtkts.Bytes) > 0 {
-		m.AdditionalTickets = rawtkts
-	}
-	b, err = asn1.Marshal(m)
-	if err != nil {
-		return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body")
-	}
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBCred.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBCred.go
deleted file mode 100644
index 380cf80..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBCred.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package messages
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-type marshalKRBCred struct {
-	PVNO    int                 `asn1:"explicit,tag:0"`
-	MsgType int                 `asn1:"explicit,tag:1"`
-	Tickets asn1.RawValue       `asn1:"explicit,tag:2"`
-	EncPart types.EncryptedData `asn1:"explicit,tag:3"`
-}
-
-// KRBCred implements RFC 4120 KRB_CRED: https://tools.ietf.org/html/rfc4120#section-5.8.1.
-type KRBCred struct {
-	PVNO             int
-	MsgType          int
-	Tickets          []Ticket
-	EncPart          types.EncryptedData
-	DecryptedEncPart EncKrbCredPart
-}
-
-// EncKrbCredPart is the encrypted part of KRB_CRED.
-type EncKrbCredPart struct {
-	TicketInfo []KrbCredInfo     `asn1:"explicit,tag:0"`
-	Nouce      int               `asn1:"optional,explicit,tag:1"`
-	Timestamp  time.Time         `asn1:"generalized,optional,explicit,tag:2"`
-	Usec       int               `asn1:"optional,explicit,tag:3"`
-	SAddress   types.HostAddress `asn1:"optional,explicit,tag:4"`
-	RAddress   types.HostAddress `asn1:"optional,explicit,tag:5"`
-}
-
-// KrbCredInfo is the KRB_CRED_INFO part of KRB_CRED.
-type KrbCredInfo struct {
-	Key       types.EncryptionKey `asn1:"explicit,tag:0"`
-	PRealm    string              `asn1:"generalstring,optional,explicit,tag:1"`
-	PName     types.PrincipalName `asn1:"optional,explicit,tag:2"`
-	Flags     asn1.BitString      `asn1:"optional,explicit,tag:3"`
-	AuthTime  time.Time           `asn1:"generalized,optional,explicit,tag:4"`
-	StartTime time.Time           `asn1:"generalized,optional,explicit,tag:5"`
-	EndTime   time.Time           `asn1:"generalized,optional,explicit,tag:6"`
-	RenewTill time.Time           `asn1:"generalized,optional,explicit,tag:7"`
-	SRealm    string              `asn1:"optional,explicit,ia5,tag:8"`
-	SName     types.PrincipalName `asn1:"optional,explicit,tag:9"`
-	CAddr     types.HostAddresses `asn1:"optional,explicit,tag:10"`
-}
-
-// Unmarshal bytes b into the KRBCred struct.
-func (k *KRBCred) Unmarshal(b []byte) error {
-	var m marshalKRBCred
-	_, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBCred))
-	if err != nil {
-		return processUnmarshalReplyError(b, err)
-	}
-	expectedMsgType := msgtype.KRB_CRED
-	if m.MsgType != expectedMsgType {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_CRED. Expected: %v; Actual: %v", expectedMsgType, m.MsgType)
-	}
-	k.PVNO = m.PVNO
-	k.MsgType = m.MsgType
-	k.EncPart = m.EncPart
-	if len(m.Tickets.Bytes) > 0 {
-		k.Tickets, err = unmarshalTicketsSequence(m.Tickets)
-		if err != nil {
-			return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling tickets within KRB_CRED")
-		}
-	}
-	return nil
-}
-
-// DecryptEncPart decrypts the encrypted part of a KRB_CRED.
-func (k *KRBCred) DecryptEncPart(key types.EncryptionKey) error {
-	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_CRED_ENCPART)
-	if err != nil {
-		return krberror.Errorf(err, krberror.DecryptingError, "error decrypting KRB_CRED EncPart")
-	}
-	var denc EncKrbCredPart
-	err = denc.Unmarshal(b)
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part of KRB_CRED")
-	}
-	k.DecryptedEncPart = denc
-	return nil
-}
-
-// Unmarshal bytes b into the encrypted part of KRB_CRED.
-func (k *EncKrbCredPart) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbCredPart))
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling EncKrbCredPart")
-	}
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBError.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBError.go
deleted file mode 100644
index 5aa9def..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBError.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Package messages implements Kerberos 5 message types and methods.
-package messages
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/iana"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/errorcode"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// KRBError implements RFC 4120 KRB_ERROR: https://tools.ietf.org/html/rfc4120#section-5.9.1.
-type KRBError struct {
-	PVNO      int                 `asn1:"explicit,tag:0"`
-	MsgType   int                 `asn1:"explicit,tag:1"`
-	CTime     time.Time           `asn1:"generalized,optional,explicit,tag:2"`
-	Cusec     int                 `asn1:"optional,explicit,tag:3"`
-	STime     time.Time           `asn1:"generalized,explicit,tag:4"`
-	Susec     int                 `asn1:"explicit,tag:5"`
-	ErrorCode int32               `asn1:"explicit,tag:6"`
-	CRealm    string              `asn1:"generalstring,optional,explicit,tag:7"`
-	CName     types.PrincipalName `asn1:"optional,explicit,tag:8"`
-	Realm     string              `asn1:"generalstring,explicit,tag:9"`
-	SName     types.PrincipalName `asn1:"explicit,tag:10"`
-	EText     string              `asn1:"generalstring,optional,explicit,tag:11"`
-	EData     []byte              `asn1:"optional,explicit,tag:12"`
-}
-
-// NewKRBError creates a new KRBError.
-func NewKRBError(sname types.PrincipalName, realm string, code int32, etext string) KRBError {
-	t := time.Now().UTC()
-	return KRBError{
-		PVNO:      iana.PVNO,
-		MsgType:   msgtype.KRB_ERROR,
-		STime:     t,
-		Susec:     int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
-		ErrorCode: code,
-		SName:     sname,
-		Realm:     realm,
-		EText:     etext,
-	}
-}
-
-// Unmarshal bytes b into the KRBError struct.
-func (k *KRBError) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBError))
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "KRB_ERROR unmarshal error")
-	}
-	expectedMsgType := msgtype.KRB_ERROR
-	if k.MsgType != expectedMsgType {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_ERROR. Expected: %v; Actual: %v", expectedMsgType, k.MsgType)
-	}
-	return nil
-}
-
-// Error method implementing error interface on KRBError struct.
-func (k KRBError) Error() string {
-	etxt := fmt.Sprintf("KRB Error: %s", errorcode.Lookup(k.ErrorCode))
-	if k.EText != "" {
-		etxt = fmt.Sprintf("%s - %s", etxt, k.EText)
-	}
-	return etxt
-}
-
-func processUnmarshalReplyError(b []byte, err error) error {
-	switch err.(type) {
-	case asn1.StructuralError:
-		var krberr KRBError
-		tmperr := krberr.Unmarshal(b)
-		if tmperr != nil {
-			return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply")
-		}
-		return krberr
-	default:
-		return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply")
-	}
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBPriv.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBPriv.go
deleted file mode 100644
index ebc5d3d..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBPriv.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package messages
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// KRBPriv implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.7.1.
-type KRBPriv struct {
-	PVNO             int                 `asn1:"explicit,tag:0"`
-	MsgType          int                 `asn1:"explicit,tag:1"`
-	EncPart          types.EncryptedData `asn1:"explicit,tag:3"`
-	DecryptedEncPart EncKrbPrivPart      `asn1:"optional,omitempty"` // Not part of ASN1 bytes so marked as optional so unmarshalling works
-}
-
-// EncKrbPrivPart is the encrypted part of KRB_PRIV.
-type EncKrbPrivPart struct {
-	UserData       []byte            `asn1:"explicit,tag:0"`
-	Timestamp      time.Time         `asn1:"generalized,optional,explicit,tag:1"`
-	Usec           int               `asn1:"optional,explicit,tag:2"`
-	SequenceNumber int64             `asn1:"optional,explicit,tag:3"`
-	SAddress       types.HostAddress `asn1:"explicit,tag:4"`
-	RAddress       types.HostAddress `asn1:"optional,explicit,tag:5"`
-}
-
-// NewKRBPriv returns a new KRBPriv type.
-func NewKRBPriv(part EncKrbPrivPart) KRBPriv {
-	return KRBPriv{
-		PVNO:             iana.PVNO,
-		MsgType:          msgtype.KRB_PRIV,
-		DecryptedEncPart: part,
-	}
-}
-
-// Unmarshal bytes b into the KRBPriv struct.
-func (k *KRBPriv) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBPriv))
-	if err != nil {
-		return processUnmarshalReplyError(b, err)
-	}
-	expectedMsgType := msgtype.KRB_PRIV
-	if k.MsgType != expectedMsgType {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_PRIV. Expected: %v; Actual: %v", expectedMsgType, k.MsgType)
-	}
-	return nil
-}
-
-// Unmarshal bytes b into the EncKrbPrivPart struct.
-func (k *EncKrbPrivPart) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbPrivPart))
-	if err != nil {
-		return krberror.Errorf(err, krberror.EncodingError, "KRB_PRIV unmarshal error")
-	}
-	return nil
-}
-
-// Marshal the KRBPriv.
-func (k *KRBPriv) Marshal() ([]byte, error) {
-	tk := KRBPriv{
-		PVNO:    k.PVNO,
-		MsgType: k.MsgType,
-		EncPart: k.EncPart,
-	}
-	b, err := asn1.Marshal(tk)
-	if err != nil {
-		return []byte{}, err
-	}
-	b = asn1tools.AddASNAppTag(b, asnAppTag.KRBPriv)
-	return b, nil
-}
-
-// EncryptEncPart encrypts the DecryptedEncPart within the KRBPriv.
-// Use to prepare for marshaling.
-func (k *KRBPriv) EncryptEncPart(key types.EncryptionKey) error {
-	b, err := asn1.Marshal(k.DecryptedEncPart)
-	if err != nil {
-		return err
-	}
-	b = asn1tools.AddASNAppTag(b, asnAppTag.EncKrbPrivPart)
-	k.EncPart, err = crypto.GetEncryptedData(b, key, keyusage.KRB_PRIV_ENCPART, 1)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// DecryptEncPart decrypts the encrypted part of the KRBPriv message.
-func (k *KRBPriv) DecryptEncPart(key types.EncryptionKey) error {
-	b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_PRIV_ENCPART)
-	if err != nil {
-		return fmt.Errorf("error decrypting KRBPriv EncPart: %v", err)
-	}
-	err = k.DecryptedEncPart.Unmarshal(b)
-	if err != nil {
-		return fmt.Errorf("error unmarshaling encrypted part: %v", err)
-	}
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBSafe.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBSafe.go
deleted file mode 100644
index 9c5acc1..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/KRBSafe.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package messages
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/msgtype"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-/*
-KRB-SAFE        ::= [APPLICATION 20] SEQUENCE {
-	pvno            [0] INTEGER (5),
-	msg-type        [1] INTEGER (20),
-	safe-body       [2] KRB-SAFE-BODY,
-	cksum           [3] Checksum
-}
-
-KRB-SAFE-BODY   ::= SEQUENCE {
-	user-data       [0] OCTET STRING,
-	timestamp       [1] KerberosTime OPTIONAL,
-	usec            [2] Microseconds OPTIONAL,
-	seq-number      [3] UInt32 OPTIONAL,
-	s-address       [4] HostAddress,
-	r-address       [5] HostAddress OPTIONAL
-}
-*/
-
-// KRBSafe implements RFC 4120 KRB_SAFE: https://tools.ietf.org/html/rfc4120#section-5.6.1.
-type KRBSafe struct {
-	PVNO     int            `asn1:"explicit,tag:0"`
-	MsgType  int            `asn1:"explicit,tag:1"`
-	SafeBody KRBSafeBody    `asn1:"explicit,tag:2"`
-	Cksum    types.Checksum `asn1:"explicit,tag:3"`
-}
-
-// KRBSafeBody implements the KRB_SAFE_BODY of KRB_SAFE.
-type KRBSafeBody struct {
-	UserData       []byte            `asn1:"explicit,tag:0"`
-	Timestamp      time.Time         `asn1:"generalized,optional,explicit,tag:1"`
-	Usec           int               `asn1:"optional,explicit,tag:2"`
-	SequenceNumber int64             `asn1:"optional,explicit,tag:3"`
-	SAddress       types.HostAddress `asn1:"explicit,tag:4"`
-	RAddress       types.HostAddress `asn1:"optional,explicit,tag:5"`
-}
-
-// Unmarshal bytes b into the KRBSafe struct.
-func (s *KRBSafe) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, s, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBSafe))
-	if err != nil {
-		return processUnmarshalReplyError(b, err)
-	}
-	expectedMsgType := msgtype.KRB_SAFE
-	if s.MsgType != expectedMsgType {
-		return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_SAFE. Expected: %v; Actual: %v", expectedMsgType, s.MsgType)
-	}
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/Ticket.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/Ticket.go
deleted file mode 100644
index 49664b8..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/messages/Ticket.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package messages
-
-import (
-	"crypto/rand"
-	"fmt"
-	"log"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/adtype"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/errorcode"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/flags"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/keytab"
-	"gopkg.in/jcmturner/gokrb5.v7/krberror"
-	"gopkg.in/jcmturner/gokrb5.v7/pac"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-)
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.3
-
-// Ticket implements the Kerberos ticket.
-type Ticket struct {
-	TktVNO           int                 `asn1:"explicit,tag:0"`
-	Realm            string              `asn1:"generalstring,explicit,tag:1"`
-	SName            types.PrincipalName `asn1:"explicit,tag:2"`
-	EncPart          types.EncryptedData `asn1:"explicit,tag:3"`
-	DecryptedEncPart EncTicketPart       `asn1:"optional"` // Not part of ASN1 bytes so marked as optional so unmarshalling works
-}
-
-// EncTicketPart is the encrypted part of the Ticket.
-type EncTicketPart struct {
-	Flags             asn1.BitString          `asn1:"explicit,tag:0"`
-	Key               types.EncryptionKey     `asn1:"explicit,tag:1"`
-	CRealm            string                  `asn1:"generalstring,explicit,tag:2"`
-	CName             types.PrincipalName     `asn1:"explicit,tag:3"`
-	Transited         TransitedEncoding       `asn1:"explicit,tag:4"`
-	AuthTime          time.Time               `asn1:"generalized,explicit,tag:5"`
-	StartTime         time.Time               `asn1:"generalized,explicit,optional,tag:6"`
-	EndTime           time.Time               `asn1:"generalized,explicit,tag:7"`
-	RenewTill         time.Time               `asn1:"generalized,explicit,optional,tag:8"`
-	CAddr             types.HostAddresses     `asn1:"explicit,optional,tag:9"`
-	AuthorizationData types.AuthorizationData `asn1:"explicit,optional,tag:10"`
-}
-
-// TransitedEncoding part of the ticket's encrypted part.
-type TransitedEncoding struct {
-	TRType   int32  `asn1:"explicit,tag:0"`
-	Contents []byte `asn1:"explicit,tag:1"`
-}
-
-// NewTicket creates a new Ticket instance.
-func NewTicket(cname types.PrincipalName, crealm string, sname types.PrincipalName, srealm string, flags asn1.BitString, sktab *keytab.Keytab, eTypeID int32, kvno int, authTime, startTime, endTime, renewTill time.Time) (Ticket, types.EncryptionKey, error) {
-	etype, err := crypto.GetEtype(eTypeID)
-	if err != nil {
-		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting etype for new ticket")
-	}
-	ks := etype.GetKeyByteSize()
-	kv := make([]byte, ks, ks)
-	rand.Read(kv)
-	sessionKey := types.EncryptionKey{
-		KeyType:  eTypeID,
-		KeyValue: kv,
-	}
-	etp := EncTicketPart{
-		Flags:     flags,
-		Key:       sessionKey,
-		CRealm:    crealm,
-		CName:     cname,
-		Transited: TransitedEncoding{},
-		AuthTime:  authTime,
-		StartTime: startTime,
-		EndTime:   endTime,
-		RenewTill: renewTill,
-	}
-	b, err := asn1.Marshal(etp)
-	if err != nil {
-		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncodingError, "error marshalling ticket encpart")
-	}
-	b = asn1tools.AddASNAppTag(b, asnAppTag.EncTicketPart)
-	skey, err := sktab.GetEncryptionKey(sname, srealm, kvno, eTypeID)
-	if err != nil {
-		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting encryption key for new ticket")
-	}
-	ed, err := crypto.GetEncryptedData(b, skey, keyusage.KDC_REP_TICKET, kvno)
-	if err != nil {
-		return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error encrypting ticket encpart")
-	}
-	tkt := Ticket{
-		TktVNO:  iana.PVNO,
-		Realm:   srealm,
-		SName:   sname,
-		EncPart: ed,
-	}
-	return tkt, sessionKey, nil
-}
-
-// Unmarshal bytes b into a Ticket struct.
-func (t *Ticket) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.Ticket))
-	return err
-}
-
-// Marshal the Ticket.
-func (t *Ticket) Marshal() ([]byte, error) {
-	b, err := asn1.Marshal(*t)
-	if err != nil {
-		return nil, err
-	}
-	b = asn1tools.AddASNAppTag(b, asnAppTag.Ticket)
-	return b, nil
-}
-
-// Unmarshal bytes b into the EncTicketPart struct.
-func (t *EncTicketPart) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.EncTicketPart))
-	return err
-}
-
-// unmarshalTicket returns a ticket from the bytes provided.
-func unmarshalTicket(b []byte) (t Ticket, err error) {
-	err = t.Unmarshal(b)
-	return
-}
-
-// UnmarshalTicketsSequence returns a slice of Tickets from a raw ASN1 value.
-func unmarshalTicketsSequence(in asn1.RawValue) ([]Ticket, error) {
-	//This is a workaround to a asn1 decoding issue in golang - https://github.com/golang/go/issues/17321. It's not pretty I'm afraid
-	//We pull out raw values from the larger raw value (that is actually the data of the sequence of raw values) and track our position moving along the data.
-	b := in.Bytes
-	// Ignore the head of the asn1 stream (1 byte for tag and those for the length) as this is what tells us its a sequence but we're handling it ourselves
-	p := 1 + asn1tools.GetNumberBytesInLengthHeader(in.Bytes)
-	var tkts []Ticket
-	var raw asn1.RawValue
-	for p < (len(b)) {
-		_, err := asn1.UnmarshalWithParams(b[p:], &raw, fmt.Sprintf("application,tag:%d", asnAppTag.Ticket))
-		if err != nil {
-			return nil, fmt.Errorf("unmarshaling sequence of tickets failed getting length of ticket: %v", err)
-		}
-		t, err := unmarshalTicket(b[p:])
-		if err != nil {
-			return nil, fmt.Errorf("unmarshaling sequence of tickets failed: %v", err)
-		}
-		p += len(raw.FullBytes)
-		tkts = append(tkts, t)
-	}
-	MarshalTicketSequence(tkts)
-	return tkts, nil
-}
-
-// MarshalTicketSequence marshals a slice of Tickets returning an ASN1 raw value containing the ticket sequence.
-func MarshalTicketSequence(tkts []Ticket) (asn1.RawValue, error) {
-	raw := asn1.RawValue{
-		Class:      2,
-		IsCompound: true,
-	}
-	if len(tkts) < 1 {
-		// There are no tickets to marshal
-		return raw, nil
-	}
-	var btkts []byte
-	for i, t := range tkts {
-		b, err := t.Marshal()
-		if err != nil {
-			return raw, fmt.Errorf("error marshaling ticket number %d in sequence of tickets", i+1)
-		}
-		btkts = append(btkts, b...)
-	}
-	// The ASN1 wrapping consists of 2 bytes:
-	// 1st byte -> Identifier Octet - In this case an OCTET STRING (ASN TAG
-	// 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here.
-	// Application Tag:
-	//| Byte:       | 8                            | 7                          | 6                                         | 5 | 4 | 3 | 2 | 1             |
-	//| Value:      | 0                            | 1                          | 1                                         | From the RFC spec 4120        |
-	//| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value |
-	btkts = append(asn1tools.MarshalLengthBytes(len(btkts)), btkts...)
-	btkts = append([]byte{byte(32 + asn1.TagSequence)}, btkts...)
-	raw.Bytes = btkts
-	// If we need to create the full bytes then identifier octet is "context-specific" = 128 + "constructed" + 32 + the wrapping explicit tag (11)
-	//fmt.Fprintf(os.Stderr, "mRaw fb: %v\n", raw.FullBytes)
-	return raw, nil
-}
-
-// DecryptEncPart decrypts the encrypted part of the ticket.
-// The sname argument can be used to specify which service principal's key should be used to decrypt the ticket.
-// If nil is passed as the sname then the service principal specified within the ticket it used.
-func (t *Ticket) DecryptEncPart(keytab *keytab.Keytab, sname *types.PrincipalName) error {
-	if sname == nil {
-		sname = &t.SName
-	}
-	key, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType)
-	if err != nil {
-		return NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err))
-	}
-	return t.Decrypt(key)
-}
-
-// Decrypt decrypts the encrypted part of the ticket using the key provided.
-func (t *Ticket) Decrypt(key types.EncryptionKey) error {
-	b, err := crypto.DecryptEncPart(t.EncPart, key, keyusage.KDC_REP_TICKET)
-	if err != nil {
-		return fmt.Errorf("error decrypting Ticket EncPart: %v", err)
-	}
-	var denc EncTicketPart
-	err = denc.Unmarshal(b)
-	if err != nil {
-		return fmt.Errorf("error unmarshaling encrypted part: %v", err)
-	}
-	t.DecryptedEncPart = denc
-	return nil
-}
-
-// GetPACType returns a Microsoft PAC that has been extracted from the ticket and processed.
-func (t *Ticket) GetPACType(keytab *keytab.Keytab, sname *types.PrincipalName, l *log.Logger) (bool, pac.PACType, error) {
-	var isPAC bool
-	for _, ad := range t.DecryptedEncPart.AuthorizationData {
-		if ad.ADType == adtype.ADIfRelevant {
-			var ad2 types.AuthorizationData
-			err := ad2.Unmarshal(ad.ADData)
-			if err != nil {
-				l.Printf("PAC authorization data could not be unmarshaled: %v", err)
-				continue
-			}
-			if ad2[0].ADType == adtype.ADWin2KPAC {
-				isPAC = true
-				var p pac.PACType
-				err = p.Unmarshal(ad2[0].ADData)
-				if err != nil {
-					return isPAC, p, fmt.Errorf("error unmarshaling PAC: %v", err)
-				}
-				if sname == nil {
-					sname = &t.SName
-				}
-				key, err := keytab.GetEncryptionKey(*sname, t.Realm, t.EncPart.KVNO, t.EncPart.EType)
-				if err != nil {
-					return isPAC, p, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err))
-				}
-				err = p.ProcessPACInfoBuffers(key, l)
-				return isPAC, p, err
-			}
-		}
-	}
-	return isPAC, pac.PACType{}, nil
-}
-
-// Valid checks it the ticket is currently valid. Max duration passed endtime passed in as argument.
-func (t *Ticket) Valid(d time.Duration) (bool, error) {
-	// Check for future tickets or invalid tickets
-	time := time.Now().UTC()
-	if t.DecryptedEncPart.StartTime.Sub(time) > d || types.IsFlagSet(&t.DecryptedEncPart.Flags, flags.Invalid) {
-		return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_NYV, "service ticket provided is not yet valid")
-	}
-
-	// Check for expired ticket
-	if time.Sub(t.DecryptedEncPart.EndTime) > d {
-		return false, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_TKT_EXPIRED, "service ticket provided has expired")
-	}
-
-	return true, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_claims.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_claims.go
deleted file mode 100644
index 612979e..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_claims.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package pac
-
-import (
-	"bytes"
-	"fmt"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx
-
-// ClientClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh536365.aspx
-type ClientClaimsInfo struct {
-	ClaimsSetMetadata mstypes.ClaimsSetMetadata
-	ClaimsSet         mstypes.ClaimsSet
-}
-
-// Unmarshal bytes into the ClientClaimsInfo struct
-func (k *ClientClaimsInfo) Unmarshal(b []byte) (err error) {
-	dec := ndr.NewDecoder(bytes.NewReader(b))
-	m := new(mstypes.ClaimsSetMetadata)
-	err = dec.Decode(m)
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err)
-	}
-	k.ClaimsSetMetadata = *m
-	k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet()
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err)
-	}
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_info.go
deleted file mode 100644
index ad5212d..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/client_info.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package pac
-
-import (
-	"bytes"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-)
-
-// ClientInfo implements https://msdn.microsoft.com/en-us/library/cc237951.aspx
-type ClientInfo struct {
-	ClientID   mstypes.FileTime // A FILETIME structure in little-endian format that contains the Kerberos initial ticket-granting ticket TGT authentication time
-	NameLength uint16           // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the Name field.
-	Name       string           // An array of 16-bit Unicode characters in little-endian format that contains the client's account name.
-}
-
-// Unmarshal bytes into the ClientInfo struct
-func (k *ClientInfo) Unmarshal(b []byte) (err error) {
-	//The PAC_CLIENT_INFO structure is a simple structure that is not NDR-encoded.
-	r := mstypes.NewReader(bytes.NewReader(b))
-
-	k.ClientID, err = r.FileTime()
-	if err != nil {
-		return
-	}
-	k.NameLength, err = r.Uint16()
-	if err != nil {
-		return
-	}
-	k.Name, err = r.UTF16String(int(k.NameLength))
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/credentials_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/credentials_info.go
deleted file mode 100644
index a8c2c3c..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/credentials_info.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package pac
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-// https://msdn.microsoft.com/en-us/library/cc237931.aspx
-
-// CredentialsInfo implements https://msdn.microsoft.com/en-us/library/cc237953.aspx
-type CredentialsInfo struct {
-	Version                    uint32 // A 32-bit unsigned integer in little-endian format that defines the version. MUST be 0x00000000.
-	EType                      uint32
-	PACCredentialDataEncrypted []byte // Key usage number for encryption: KERB_NON_KERB_SALT (16)
-	PACCredentialData          CredentialData
-}
-
-// Unmarshal bytes into the CredentialsInfo struct
-func (c *CredentialsInfo) Unmarshal(b []byte, k types.EncryptionKey) (err error) {
-	//The CredentialsInfo structure is a simple structure that is not NDR-encoded.
-	r := mstypes.NewReader(bytes.NewReader(b))
-
-	c.Version, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	if c.Version != 0 {
-		err = errors.New("credentials info version is not zero")
-		return
-	}
-	c.EType, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	c.PACCredentialDataEncrypted, err = r.ReadBytes(len(b) - 8)
-
-	err = c.DecryptEncPart(k)
-	if err != nil {
-		err = fmt.Errorf("error decrypting PAC Credentials Data: %v", err)
-		return
-	}
-	return
-}
-
-// DecryptEncPart decrypts the encrypted part of the CredentialsInfo.
-func (c *CredentialsInfo) DecryptEncPart(k types.EncryptionKey) error {
-	if k.KeyType != int32(c.EType) {
-		return fmt.Errorf("key provided is not the correct type. Type needed: %d, type provided: %d", c.EType, k.KeyType)
-	}
-	pt, err := crypto.DecryptMessage(c.PACCredentialDataEncrypted, k, keyusage.KERB_NON_KERB_SALT)
-	if err != nil {
-		return err
-	}
-	err = c.PACCredentialData.Unmarshal(pt)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-// CredentialData implements https://msdn.microsoft.com/en-us/library/cc237952.aspx
-// This structure is encrypted prior to being encoded in any other structures.
-// Encryption is performed by first serializing the data structure via Network Data Representation (NDR) encoding, as specified in [MS-RPCE].
-// Once serialized, the data is encrypted using the key and cryptographic system selected through the AS protocol and the KRB_AS_REP message
-// Fields (for capturing this information) and cryptographic parameters are specified in PAC_CREDENTIAL_INFO (section 2.6.1).
-type CredentialData struct {
-	CredentialCount uint32
-	Credentials     []SECPKGSupplementalCred // Size is the value of CredentialCount
-}
-
-// Unmarshal converts the bytes provided into a CredentialData type.
-func (c *CredentialData) Unmarshal(b []byte) (err error) {
-	dec := ndr.NewDecoder(bytes.NewReader(b))
-	err = dec.Decode(c)
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err)
-	}
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_claims.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_claims.go
deleted file mode 100644
index c2299bb..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_claims.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package pac
-
-import (
-	"bytes"
-	"fmt"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx
-
-// DeviceClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh554226.aspx
-type DeviceClaimsInfo struct {
-	ClaimsSetMetadata mstypes.ClaimsSetMetadata
-	ClaimsSet         mstypes.ClaimsSet
-}
-
-// Unmarshal bytes into the ClientClaimsInfo struct
-func (k *DeviceClaimsInfo) Unmarshal(b []byte) (err error) {
-	dec := ndr.NewDecoder(bytes.NewReader(b))
-	m := new(mstypes.ClaimsSetMetadata)
-	err = dec.Decode(m)
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSetMetadata: %v", err)
-	}
-	k.ClaimsSetMetadata = *m
-	k.ClaimsSet, err = k.ClaimsSetMetadata.ClaimsSet()
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling ClientClaimsInfo ClaimsSet: %v", err)
-	}
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_info.go
deleted file mode 100644
index 51be207..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/device_info.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package pac
-
-import (
-	"bytes"
-	"fmt"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-// DeviceInfo implements https://msdn.microsoft.com/en-us/library/hh536402.aspx
-type DeviceInfo struct {
-	UserID            uint32                          // A 32-bit unsigned integer that contains the RID of the account. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account.
-	PrimaryGroupID    uint32                          // A 32-bit unsigned integer that contains the RID for the primary group to which this account belongs.
-	AccountDomainID   mstypes.RPCSID                  `ndr:"pointer"` // A SID structure that contains the SID for the domain of the account.This member is used in conjunction with the UserId, and GroupIds members to create the user and group SIDs for the client.
-	AccountGroupCount uint32                          // A 32-bit unsigned integer that contains the number of groups within the account domain to which the account belongs
-	AccountGroupIDs   []mstypes.GroupMembership       `ndr:"pointer,conformant"` // A pointer to a list of GROUP_MEMBERSHIP (section 2.2.2) structures that contains the groups to which the account belongs in the account domain. The number of groups in this list MUST be equal to GroupCount.
-	SIDCount          uint32                          // A 32-bit unsigned integer that contains the total number of SIDs present in the ExtraSids member.
-	ExtraSIDs         []mstypes.KerbSidAndAttributes  `ndr:"pointer,conformant"` // A pointer to a list of KERB_SID_AND_ATTRIBUTES structures that contain a list of SIDs corresponding to groups not in domains. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account.
-	DomainGroupCount  uint32                          // A 32-bit unsigned integer that contains the number of domains with groups to which the account belongs.
-	DomainGroup       []mstypes.DomainGroupMembership `ndr:"pointer,conformant"` // A pointer to a list of DOMAIN_GROUP_MEMBERSHIP structures (section 2.2.3) that contains the domains to which the account belongs to a group. The number of sets in this list MUST be equal to DomainCount.
-}
-
-// Unmarshal bytes into the DeviceInfo struct
-func (k *DeviceInfo) Unmarshal(b []byte) (err error) {
-	dec := ndr.NewDecoder(bytes.NewReader(b))
-	err = dec.Decode(k)
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling DeviceInfo: %v", err)
-	}
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/kerb_validation_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/kerb_validation_info.go
deleted file mode 100644
index 9dd69d2..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/kerb_validation_info.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Package pac implements Microsoft Privilege Attribute Certificate (PAC) processing.
-package pac
-
-import (
-	"bytes"
-	"fmt"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-// KERB_VALIDATION_INFO flags.
-const (
-	USERFLAG_GUEST                                    = 31 // Authentication was done via the GUEST account; no password was used.
-	USERFLAG_NO_ENCRYPTION_AVAILABLE                  = 30 // No encryption is available.
-	USERFLAG_LAN_MANAGER_KEY                          = 28 // LAN Manager key was used for authentication.
-	USERFLAG_SUB_AUTH                                 = 25 // Sub-authentication used; session key came from the sub-authentication package.
-	USERFLAG_EXTRA_SIDS                               = 26 // Indicates that the ExtraSids field is populated and contains additional SIDs.
-	USERFLAG_MACHINE_ACCOUNT                          = 24 // Indicates that the account is a machine account.
-	USERFLAG_DC_NTLM2                                 = 23 // Indicates that the domain controller understands NTLMv2.
-	USERFLAG_RESOURCE_GROUPIDS                        = 22 // Indicates that the ResourceGroupIds field is populated.
-	USERFLAG_PROFILEPATH                              = 21 // Indicates that ProfilePath is populated.
-	USERFLAG_NTLM2_NTCHALLENGERESP                    = 20 // The NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation.
-	USERFLAG_LM2_LMCHALLENGERESP                      = 19 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation.
-	USERFLAG_AUTH_LMCHALLENGERESP_KEY_NTCHALLENGERESP = 18 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and the NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used session key generation.
-)
-
-// KerbValidationInfo implement https://msdn.microsoft.com/en-us/library/cc237948.aspx
-// The KERB_VALIDATION_INFO structure defines the user's logon and authorization information
-// provided by the DC. The KERB_VALIDATION_INFO structure is a subset of the
-// NETLOGON_VALIDATION_SAM_INFO4 structure ([MS-NRPC] section 2.2.1.4.13).
-// It is a subset due to historical reasons and to the use of the common Active Directory to generate this information.
-// The KERB_VALIDATION_INFO structure is marshaled by RPC [MS-RPCE].
-type KerbValidationInfo struct {
-	LogOnTime              mstypes.FileTime
-	LogOffTime             mstypes.FileTime
-	KickOffTime            mstypes.FileTime
-	PasswordLastSet        mstypes.FileTime
-	PasswordCanChange      mstypes.FileTime
-	PasswordMustChange     mstypes.FileTime
-	EffectiveName          mstypes.RPCUnicodeString
-	FullName               mstypes.RPCUnicodeString
-	LogonScript            mstypes.RPCUnicodeString
-	ProfilePath            mstypes.RPCUnicodeString
-	HomeDirectory          mstypes.RPCUnicodeString
-	HomeDirectoryDrive     mstypes.RPCUnicodeString
-	LogonCount             uint16
-	BadPasswordCount       uint16
-	UserID                 uint32
-	PrimaryGroupID         uint32
-	GroupCount             uint32
-	GroupIDs               []mstypes.GroupMembership `ndr:"pointer,conformant"`
-	UserFlags              uint32
-	UserSessionKey         mstypes.UserSessionKey
-	LogonServer            mstypes.RPCUnicodeString
-	LogonDomainName        mstypes.RPCUnicodeString
-	LogonDomainID          mstypes.RPCSID `ndr:"pointer"`
-	Reserved1              [2]uint32      // Has 2 elements
-	UserAccountControl     uint32
-	SubAuthStatus          uint32
-	LastSuccessfulILogon   mstypes.FileTime
-	LastFailedILogon       mstypes.FileTime
-	FailedILogonCount      uint32
-	Reserved3              uint32
-	SIDCount               uint32
-	ExtraSIDs              []mstypes.KerbSidAndAttributes `ndr:"pointer,conformant"`
-	ResourceGroupDomainSID mstypes.RPCSID                 `ndr:"pointer"`
-	ResourceGroupCount     uint32
-	ResourceGroupIDs       []mstypes.GroupMembership `ndr:"pointer,conformant"`
-}
-
-// Unmarshal bytes into the DeviceInfo struct
-func (k *KerbValidationInfo) Unmarshal(b []byte) (err error) {
-	dec := ndr.NewDecoder(bytes.NewReader(b))
-	err = dec.Decode(k)
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling KerbValidationInfo: %v", err)
-	}
-	return
-}
-
-// GetGroupMembershipSIDs returns a slice of strings containing the group membership SIDs found in the PAC.
-func (k *KerbValidationInfo) GetGroupMembershipSIDs() []string {
-	var g []string
-	lSID := k.LogonDomainID.String()
-	for i := range k.GroupIDs {
-		g = append(g, fmt.Sprintf("%s-%d", lSID, k.GroupIDs[i].RelativeID))
-	}
-	for _, s := range k.ExtraSIDs {
-		var exists = false
-		for _, es := range g {
-			if es == s.SID.String() {
-				exists = true
-				break
-			}
-		}
-		if !exists {
-			g = append(g, s.SID.String())
-		}
-	}
-	for _, r := range k.ResourceGroupIDs {
-		var exists = false
-		s := fmt.Sprintf("%s-%d", k.ResourceGroupDomainSID.String(), r.RelativeID)
-		for _, es := range g {
-			if es == s {
-				exists = true
-				break
-			}
-		}
-		if !exists {
-			g = append(g, s)
-		}
-	}
-	return g
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/pac_type.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/pac_type.go
deleted file mode 100644
index c73fd06..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/pac_type.go
+++ /dev/null
@@ -1,251 +0,0 @@
-package pac
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"log"
-
-	"gopkg.in/jcmturner/gokrb5.v7/crypto"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/keyusage"
-	"gopkg.in/jcmturner/gokrb5.v7/types"
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-)
-
-const (
-	infoTypeKerbValidationInfo     uint32 = 1
-	infoTypeCredentials            uint32 = 2
-	infoTypePACServerSignatureData uint32 = 6
-	infoTypePACKDCSignatureData    uint32 = 7
-	infoTypePACClientInfo          uint32 = 10
-	infoTypeS4UDelegationInfo      uint32 = 11
-	infoTypeUPNDNSInfo             uint32 = 12
-	infoTypePACClientClaimsInfo    uint32 = 13
-	infoTypePACDeviceInfo          uint32 = 14
-	infoTypePACDeviceClaimsInfo    uint32 = 15
-)
-
-// PACType implements: https://msdn.microsoft.com/en-us/library/cc237950.aspx
-type PACType struct {
-	CBuffers           uint32
-	Version            uint32
-	Buffers            []InfoBuffer
-	Data               []byte
-	KerbValidationInfo *KerbValidationInfo
-	CredentialsInfo    *CredentialsInfo
-	ServerChecksum     *SignatureData
-	KDCChecksum        *SignatureData
-	ClientInfo         *ClientInfo
-	S4UDelegationInfo  *S4UDelegationInfo
-	UPNDNSInfo         *UPNDNSInfo
-	ClientClaimsInfo   *ClientClaimsInfo
-	DeviceInfo         *DeviceInfo
-	DeviceClaimsInfo   *DeviceClaimsInfo
-	ZeroSigData        []byte
-}
-
-// InfoBuffer implements the PAC Info Buffer: https://msdn.microsoft.com/en-us/library/cc237954.aspx
-type InfoBuffer struct {
-	ULType       uint32 // A 32-bit unsigned integer in little-endian format that describes the type of data present in the buffer contained at Offset.
-	CBBufferSize uint32 // A 32-bit unsigned integer in little-endian format that contains the size, in bytes, of the buffer in the PAC located at Offset.
-	Offset       uint64 // A 64-bit unsigned integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the PACTYPE structure. The data offset MUST be a multiple of eight. The following sections specify the format of each type of element.
-}
-
-// Unmarshal bytes into the PACType struct
-func (pac *PACType) Unmarshal(b []byte) (err error) {
-	pac.Data = b
-	zb := make([]byte, len(b), len(b))
-	copy(zb, b)
-	pac.ZeroSigData = zb
-	r := mstypes.NewReader(bytes.NewReader(b))
-	pac.CBuffers, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	pac.Version, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	buf := make([]InfoBuffer, pac.CBuffers, pac.CBuffers)
-	for i := range buf {
-		buf[i].ULType, err = r.Uint32()
-		if err != nil {
-			return
-		}
-		buf[i].CBBufferSize, err = r.Uint32()
-		if err != nil {
-			return
-		}
-		buf[i].Offset, err = r.Uint64()
-		if err != nil {
-			return
-		}
-	}
-	pac.Buffers = buf
-	return nil
-}
-
-// ProcessPACInfoBuffers processes the PAC Info Buffers.
-// https://msdn.microsoft.com/en-us/library/cc237954.aspx
-func (pac *PACType) ProcessPACInfoBuffers(key types.EncryptionKey, l *log.Logger) error {
-	for _, buf := range pac.Buffers {
-		p := make([]byte, buf.CBBufferSize, buf.CBBufferSize)
-		copy(p, pac.Data[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)])
-		switch buf.ULType {
-		case infoTypeKerbValidationInfo:
-			if pac.KerbValidationInfo != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k KerbValidationInfo
-			err := k.Unmarshal(p)
-			if err != nil {
-				return fmt.Errorf("error processing KerbValidationInfo: %v", err)
-			}
-			pac.KerbValidationInfo = &k
-		case infoTypeCredentials:
-			// Currently PAC parsing is only useful on the service side in gokrb5
-			// The CredentialsInfo are only useful when gokrb5 has implemented RFC4556 and only applied on the client side.
-			// Skipping CredentialsInfo - will be revisited under RFC4556 implementation.
-			continue
-			//if pac.CredentialsInfo != nil {
-			//	//Must ignore subsequent buffers of this type
-			//	continue
-			//}
-			//var k CredentialsInfo
-			//err := k.Unmarshal(p, key) // The encryption key used is the AS reply key only available to the client.
-			//if err != nil {
-			//	return fmt.Errorf("error processing CredentialsInfo: %v", err)
-			//}
-			//pac.CredentialsInfo = &k
-		case infoTypePACServerSignatureData:
-			if pac.ServerChecksum != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k SignatureData
-			zb, err := k.Unmarshal(p)
-			copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb)
-			if err != nil {
-				return fmt.Errorf("error processing ServerChecksum: %v", err)
-			}
-			pac.ServerChecksum = &k
-		case infoTypePACKDCSignatureData:
-			if pac.KDCChecksum != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k SignatureData
-			zb, err := k.Unmarshal(p)
-			copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb)
-			if err != nil {
-				return fmt.Errorf("error processing KDCChecksum: %v", err)
-			}
-			pac.KDCChecksum = &k
-		case infoTypePACClientInfo:
-			if pac.ClientInfo != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k ClientInfo
-			err := k.Unmarshal(p)
-			if err != nil {
-				return fmt.Errorf("error processing ClientInfo: %v", err)
-			}
-			pac.ClientInfo = &k
-		case infoTypeS4UDelegationInfo:
-			if pac.S4UDelegationInfo != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k S4UDelegationInfo
-			err := k.Unmarshal(p)
-			if err != nil {
-				l.Printf("could not process S4U_DelegationInfo: %v", err)
-				continue
-			}
-			pac.S4UDelegationInfo = &k
-		case infoTypeUPNDNSInfo:
-			if pac.UPNDNSInfo != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k UPNDNSInfo
-			err := k.Unmarshal(p)
-			if err != nil {
-				l.Printf("could not process UPN_DNSInfo: %v", err)
-				continue
-			}
-			pac.UPNDNSInfo = &k
-		case infoTypePACClientClaimsInfo:
-			if pac.ClientClaimsInfo != nil || len(p) < 1 {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k ClientClaimsInfo
-			err := k.Unmarshal(p)
-			if err != nil {
-				l.Printf("could not process ClientClaimsInfo: %v", err)
-				continue
-			}
-			pac.ClientClaimsInfo = &k
-		case infoTypePACDeviceInfo:
-			if pac.DeviceInfo != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k DeviceInfo
-			err := k.Unmarshal(p)
-			if err != nil {
-				l.Printf("could not process DeviceInfo: %v", err)
-				continue
-			}
-			pac.DeviceInfo = &k
-		case infoTypePACDeviceClaimsInfo:
-			if pac.DeviceClaimsInfo != nil {
-				//Must ignore subsequent buffers of this type
-				continue
-			}
-			var k DeviceClaimsInfo
-			err := k.Unmarshal(p)
-			if err != nil {
-				l.Printf("could not process DeviceClaimsInfo: %v", err)
-				continue
-			}
-			pac.DeviceClaimsInfo = &k
-		}
-	}
-
-	if ok, err := pac.verify(key); !ok {
-		return err
-	}
-
-	return nil
-}
-
-func (pac *PACType) verify(key types.EncryptionKey) (bool, error) {
-	if pac.KerbValidationInfo == nil {
-		return false, errors.New("PAC Info Buffers does not contain a KerbValidationInfo")
-	}
-	if pac.ServerChecksum == nil {
-		return false, errors.New("PAC Info Buffers does not contain a ServerChecksum")
-	}
-	if pac.KDCChecksum == nil {
-		return false, errors.New("PAC Info Buffers does not contain a KDCChecksum")
-	}
-	if pac.ClientInfo == nil {
-		return false, errors.New("PAC Info Buffers does not contain a ClientInfo")
-	}
-	etype, err := crypto.GetChksumEtype(int32(pac.ServerChecksum.SignatureType))
-	if err != nil {
-		return false, err
-	}
-	if ok := etype.VerifyChecksum(key.KeyValue,
-		pac.ZeroSigData,
-		pac.ServerChecksum.Signature,
-		keyusage.KERB_NON_KERB_CKSUM_SALT); !ok {
-		return false, errors.New("PAC service checksum verification failed")
-	}
-
-	return true, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/s4u_delegation_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/s4u_delegation_info.go
deleted file mode 100644
index 614ee85..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/s4u_delegation_info.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package pac
-
-import (
-	"bytes"
-	"fmt"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-// S4UDelegationInfo implements https://msdn.microsoft.com/en-us/library/cc237944.aspx
-type S4UDelegationInfo struct {
-	S4U2proxyTarget      mstypes.RPCUnicodeString // The name of the principal to whom the application can forward the ticket.
-	TransitedListSize    uint32
-	S4UTransitedServices []mstypes.RPCUnicodeString `ndr:"pointer,conformant"` // List of all services that have been delegated through by this client and subsequent services or servers.. Size is value of TransitedListSize
-}
-
-// Unmarshal bytes into the S4UDelegationInfo struct
-func (k *S4UDelegationInfo) Unmarshal(b []byte) (err error) {
-	dec := ndr.NewDecoder(bytes.NewReader(b))
-	err = dec.Decode(k)
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling S4UDelegationInfo: %v", err)
-	}
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/signature_data.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/signature_data.go
deleted file mode 100644
index 7e0fce1..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/signature_data.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package pac
-
-import (
-	"bytes"
-
-	"gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype"
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-)
-
-/*
-https://msdn.microsoft.com/en-us/library/cc237955.aspx
-
-The Key Usage Value MUST be KERB_NON_KERB_CKSUM_SALT (17) [MS-KILE] (section 3.1.5.9).
-
-Server Signature (SignatureType = 0x00000006)
-https://msdn.microsoft.com/en-us/library/cc237957.aspx
-The KDC will use the long-term key that the KDC shares with the server, so that the server can verify this signature on receiving a PAC.
-The server signature is a keyed hash [RFC4757] of the entire PAC message, with the Signature fields of both PAC_SIGNATURE_DATA structures set to zero.
-The key used to protect the ciphertext part of the response is used.
-The checksum type corresponds to the key unless the key is DES, in which case the KERB_CHECKSUM_HMAC_MD5 key is used.
-The resulting hash value is then placed in the Signature field of the server's PAC_SIGNATURE_DATA structure.
-
-KDC Signature (SignatureType = 0x00000007)
-https://msdn.microsoft.com/en-us/library/dd357117.aspx
-The KDC will use KDC (krbtgt) key [RFC4120], so that other KDCs can verify this signature on receiving a PAC.
-The KDC signature is a keyed hash [RFC4757] of the Server Signature field in the PAC message.
-The cryptographic system that is used to calculate the checksum depends on which system the KDC supports, as defined below:
-- Supports RC4-HMAC --> KERB_CHECKSUM_HMAC_MD5
-- Does not support RC4-HMAC and supports AES256 --> HMAC_SHA1_96_AES256
-- Does not support RC4-HMAC or AES256-CTS-HMAC-SHA1-96, and supports AES128-CTS-HMAC-SHA1-96 --> HMAC_SHA1_96_AES128
-- Does not support RC4-HMAC, AES128-CTS-HMAC-SHA1-96 or AES256-CTS-HMAC-SHA1-96 -->  None. The checksum operation will fail.
-*/
-
-// SignatureData implements https://msdn.microsoft.com/en-us/library/cc237955.aspx
-type SignatureData struct {
-	SignatureType  uint32 // A 32-bit unsigned integer value in little-endian format that defines the cryptographic system used to calculate the checksum. This MUST be one of the following checksum types: KERB_CHECKSUM_HMAC_MD5 (signature size = 16), HMAC_SHA1_96_AES128 (signature size = 12), HMAC_SHA1_96_AES256 (signature size = 12).
-	Signature      []byte // Size depends on the type. See comment above.
-	RODCIdentifier uint16 // A 16-bit unsigned integer value in little-endian format that contains the first 16 bits of the key version number ([MS-KILE] section 3.1.5.8) when the KDC is an RODC. When the KDC is not an RODC, this field does not exist.
-}
-
-// Unmarshal bytes into the SignatureData struct
-func (k *SignatureData) Unmarshal(b []byte) (rb []byte, err error) {
-	r := mstypes.NewReader(bytes.NewReader(b))
-
-	k.SignatureType, err = r.Uint32()
-	if err != nil {
-		return
-	}
-
-	var c int
-	switch k.SignatureType {
-	case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED:
-		c = 16
-	case uint32(chksumtype.HMAC_SHA1_96_AES128):
-		c = 12
-	case uint32(chksumtype.HMAC_SHA1_96_AES256):
-		c = 12
-	}
-	k.Signature, err = r.ReadBytes(c)
-	if err != nil {
-		return
-	}
-
-	// When the KDC is not an Read Only Domain Controller (RODC), this field does not exist.
-	if len(b) >= 4+c+2 {
-		k.RODCIdentifier, err = r.Uint16()
-		if err != nil {
-			return
-		}
-	}
-
-	// Create bytes with zeroed signature needed for checksum verification
-	rb = make([]byte, len(b), len(b))
-	copy(rb, b)
-	z := make([]byte, len(b), len(b))
-	copy(rb[4:4+c], z)
-
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go
deleted file mode 100644
index 5f4f93c..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/supplemental_cred.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package pac
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-const (
-	// NTLMSupCredLMOWF indicates that the LM OWF member is present and valid.
-	NTLMSupCredLMOWF uint32 = 31
-	// NTLMSupCredNTOWF indicates that the NT OWF member is present and valid.
-	NTLMSupCredNTOWF uint32 = 30
-)
-
-// NTLMSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237949.aspx
-type NTLMSupplementalCred struct {
-	Version    uint32 // A 32-bit unsigned integer that defines the credential version.This field MUST be 0x00000000.
-	Flags      uint32
-	LMPassword []byte // A 16-element array of unsigned 8-bit integers that define the LM OWF. The LMPassword member MUST be ignored if the L flag is not set in the Flags member.
-	NTPassword []byte // A 16-element array of unsigned 8-bit integers that define the NT OWF. The NTPassword member MUST be ignored if the N flag is not set in the Flags member.
-}
-
-// Unmarshal converts the bytes provided into a NTLMSupplementalCred.
-func (c *NTLMSupplementalCred) Unmarshal(b []byte) (err error) {
-	r := mstypes.NewReader(bytes.NewReader(b))
-	c.Version, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	if c.Version != 0 {
-		err = errors.New("NTLMSupplementalCred version is not zero")
-		return
-	}
-	c.Flags, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	if isFlagSet(c.Flags, NTLMSupCredLMOWF) {
-		c.LMPassword, err = r.ReadBytes(16)
-		if err != nil {
-			return
-		}
-	}
-	if isFlagSet(c.Flags, NTLMSupCredNTOWF) {
-		c.NTPassword, err = r.ReadBytes(16)
-		if err != nil {
-			return
-		}
-	}
-	return
-}
-
-// isFlagSet tests if a flag is set in the uint32 little endian flag
-func isFlagSet(f uint32, i uint32) bool {
-	//Which byte?
-	b := int(i / 8)
-	//Which bit in byte
-	p := uint(7 - (int(i) - 8*b))
-	fb := make([]byte, 4)
-	binary.LittleEndian.PutUint32(fb, f)
-	if fb[b]&(1<<p) != 0 {
-		return true
-	}
-	return false
-}
-
-// SECPKGSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237956.aspx
-// The SECPKG_SUPPLEMENTAL_CRED structure defines the name of the security package that requires
-// supplemental credentials and the credential buffer for that package.
-// The SECPKG_SUPPLEMENTAL_CRED structure is marshaled by RPC.
-type SECPKGSupplementalCred struct {
-	PackageName    mstypes.RPCUnicodeString
-	CredentialSize uint32
-	Credentials    []uint8 `ndr:"pointer,conformant"` // Is a ptr. Size is the value of CredentialSize
-}
-
-// Unmarshal converts the bytes provided into a SECPKGSupplementalCred.
-func (c *SECPKGSupplementalCred) Unmarshal(b []byte) (err error) {
-	dec := ndr.NewDecoder(bytes.NewReader(b))
-	err = dec.Decode(c)
-	if err != nil {
-		err = fmt.Errorf("error unmarshaling SECPKGSupplementalCred: %v", err)
-	}
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/upn_dns_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/upn_dns_info.go
deleted file mode 100644
index ee0e6bf..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/pac/upn_dns_info.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package pac
-
-import (
-	"bytes"
-
-	"gopkg.in/jcmturner/rpc.v1/mstypes"
-)
-
-// UPNDNSInfo implements https://msdn.microsoft.com/en-us/library/dd240468.aspx
-type UPNDNSInfo struct {
-	UPNLength           uint16 // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the UPN field.
-	UPNOffset           uint16 // An unsigned 16-bit integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the UPN_DNS_INFO structure.
-	DNSDomainNameLength uint16
-	DNSDomainNameOffset uint16
-	Flags               uint32
-	UPN                 string
-	DNSDomain           string
-}
-
-const (
-	upnNoUPNAttr = 31 // The user account object does not have the userPrincipalName attribute ([MS-ADA3] section 2.349) set. A UPN constructed by concatenating the user name with the DNS domain name of the account domain is provided.
-)
-
-// Unmarshal bytes into the UPN_DNSInfo struct
-func (k *UPNDNSInfo) Unmarshal(b []byte) (err error) {
-	//The UPN_DNS_INFO structure is a simple structure that is not NDR-encoded.
-	r := mstypes.NewReader(bytes.NewReader(b))
-	k.UPNLength, err = r.Uint16()
-	if err != nil {
-		return
-	}
-	k.UPNOffset, err = r.Uint16()
-	if err != nil {
-		return
-	}
-	k.DNSDomainNameLength, err = r.Uint16()
-	if err != nil {
-		return
-	}
-	k.DNSDomainNameOffset, err = r.Uint16()
-	if err != nil {
-		return
-	}
-	k.Flags, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	ub := mstypes.NewReader(bytes.NewReader(b[k.UPNOffset : k.UPNOffset+k.UPNLength]))
-	db := mstypes.NewReader(bytes.NewReader(b[k.DNSDomainNameOffset : k.DNSDomainNameOffset+k.DNSDomainNameLength]))
-
-	u := make([]rune, k.UPNLength/2, k.UPNLength/2)
-	for i := 0; i < len(u); i++ {
-		var r uint16
-		r, err = ub.Uint16()
-		if err != nil {
-			return
-		}
-		u[i] = rune(r)
-	}
-	k.UPN = string(u)
-	d := make([]rune, k.DNSDomainNameLength/2, k.DNSDomainNameLength/2)
-	for i := 0; i < len(d); i++ {
-		var r uint16
-		r, err = db.Uint16()
-		if err != nil {
-			return
-		}
-		d[i] = rune(r)
-	}
-	k.DNSDomain = string(d)
-
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Authenticator.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Authenticator.go
deleted file mode 100644
index 500e034..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Authenticator.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Package types provides Kerberos 5 data types.
-package types
-
-import (
-	"crypto/rand"
-	"fmt"
-	"math"
-	"math/big"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/asn1tools"
-	"gopkg.in/jcmturner/gokrb5.v7/iana"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/asnAppTag"
-)
-
-/*Authenticator   ::= [APPLICATION 2] SEQUENCE  {
-authenticator-vno       [0] INTEGER (5),
-crealm                  [1] Realm,
-cname                   [2] PrincipalName,
-cksum                   [3] Checksum OPTIONAL,
-cusec                   [4] Microseconds,
-ctime                   [5] KerberosTime,
-subkey                  [6] EncryptionKey OPTIONAL,
-seq-number              [7] UInt32 OPTIONAL,
-authorization-data      [8] AuthorizationData OPTIONAL
-}
-
-   cksum
-      This field contains a checksum of the application data that
-      accompanies the KRB_AP_REQ, computed using a key usage value of 10
-      in normal application exchanges, or 6 when used in the TGS-REQ
-      PA-TGS-REQ AP-DATA field.
-
-*/
-
-// Authenticator - A record containing information that can be shown to have been recently generated using the session key known only by the client and server.
-// https://tools.ietf.org/html/rfc4120#section-5.5.1
-type Authenticator struct {
-	AVNO              int               `asn1:"explicit,tag:0"`
-	CRealm            string            `asn1:"generalstring,explicit,tag:1"`
-	CName             PrincipalName     `asn1:"explicit,tag:2"`
-	Cksum             Checksum          `asn1:"explicit,optional,tag:3"`
-	Cusec             int               `asn1:"explicit,tag:4"`
-	CTime             time.Time         `asn1:"generalized,explicit,tag:5"`
-	SubKey            EncryptionKey     `asn1:"explicit,optional,tag:6"`
-	SeqNumber         int64             `asn1:"explicit,optional,tag:7"`
-	AuthorizationData AuthorizationData `asn1:"explicit,optional,tag:8"`
-}
-
-// NewAuthenticator creates a new Authenticator.
-func NewAuthenticator(realm string, cname PrincipalName) (Authenticator, error) {
-	seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
-	if err != nil {
-		return Authenticator{}, err
-	}
-	t := time.Now().UTC()
-	return Authenticator{
-		AVNO:      iana.PVNO,
-		CRealm:    realm,
-		CName:     cname,
-		Cksum:     Checksum{},
-		Cusec:     int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
-		CTime:     t,
-		SeqNumber: seq.Int64(),
-	}, nil
-}
-
-// GenerateSeqNumberAndSubKey sets the Authenticator's sequence number and subkey.
-func (a *Authenticator) GenerateSeqNumberAndSubKey(keyType int32, keySize int) error {
-	seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
-	if err != nil {
-		return err
-	}
-	a.SeqNumber = seq.Int64()
-	//Generate subkey value
-	sk := make([]byte, keySize, keySize)
-	rand.Read(sk)
-	a.SubKey = EncryptionKey{
-		KeyType:  keyType,
-		KeyValue: sk,
-	}
-	return nil
-}
-
-// Unmarshal bytes into the Authenticator.
-func (a *Authenticator) Unmarshal(b []byte) error {
-	_, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.Authenticator))
-	return err
-}
-
-// Marshal the Authenticator.
-func (a *Authenticator) Marshal() ([]byte, error) {
-	b, err := asn1.Marshal(*a)
-	if err != nil {
-		return nil, err
-	}
-	b = asn1tools.AddASNAppTag(b, asnAppTag.Authenticator)
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/AuthorizationData.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/AuthorizationData.go
deleted file mode 100644
index c944800..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/AuthorizationData.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package types
-
-import (
-	"github.com/jcmturner/gofork/encoding/asn1"
-)
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.2.6
-
-/*
-AuthorizationData
-
--- NOTE: AuthorizationData is always used as an OPTIONAL field and
--- should not be empty.
-AuthorizationData       ::= SEQUENCE OF SEQUENCE {
-ad-type         [0] Int32,
-ad-data         [1] OCTET STRING
-}
-
-ad-data
-This field contains authorization data to be interpreted according
-to the value of the corresponding ad-type field.
-
-ad-type
-	This field specifies the format for the ad-data subfield.  All
-negative values are reserved for local use.  Non-negative values
-are reserved for registered use.
-
-Each sequence of type and data is referred to as an authorization
-element.  Elements MAY be application specific; however, there is a
-common set of recursive elements that should be understood by all
-implementations.  These elements contain other elements embedded
-within them, and the interpretation of the encapsulating element
-determines which of the embedded elements must be interpreted, and
-which may be ignored.
-
-These common authorization data elements are recursively defined,
-meaning that the ad-data for these types will itself contain a
-sequence of authorization data whose interpretation is affected by
-the encapsulating element.  Depending on the meaning of the
-encapsulating element, the encapsulated elements may be ignored,
-might be interpreted as issued directly by the KDC, or might be
-stored in a separate plaintext part of the ticket.  The types of the
-encapsulating elements are specified as part of the Kerberos
-specification because the behavior based on these values should be
-understood across implementations, whereas other elements need only
-be understood by the applications that they affect.
-
-Authorization data elements are considered critical if present in a
-ticket or authenticator.  If an unknown authorization data element
-type is received by a server either in an AP-REQ or in a ticket
-contained in an AP-REQ, then, unless it is encapsulated in a known
-authorization data element amending the criticality of the elements
-it contains, authentication MUST fail.  Authorization data is
-intended to restrict the use of a ticket.  If the service cannot
-determine whether the restriction applies to that service, then a
-security weakness may result if the ticket can be used for that
-service.  Authorization elements that are optional can be enclosed in
-an AD-IF-RELEVANT element.
-
-In the definitions that follow, the value of the ad-type for the
-element will be specified as the least significant part of the
-subsection number, and the value of the ad-data will be as shown in
-the ASN.1 structure that follows the subsection heading.
-
-   Contents of ad-data                ad-type
-
-   DER encoding of AD-IF-RELEVANT        1
-
-   DER encoding of AD-KDCIssued          4
-
-   DER encoding of AD-AND-OR             5
-
-   DER encoding of AD-MANDATORY-FOR-KDC  8
-
-*/
-
-// AuthorizationData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6
-type AuthorizationData []AuthorizationDataEntry
-
-// AuthorizationDataEntry implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6
-type AuthorizationDataEntry struct {
-	ADType int32  `asn1:"explicit,tag:0"`
-	ADData []byte `asn1:"explicit,tag:1"`
-}
-
-// ADIfRelevant implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.1
-type ADIfRelevant AuthorizationData
-
-// ADKDCIssued implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.2
-type ADKDCIssued struct {
-	ADChecksum Checksum          `asn1:"explicit,tag:0"`
-	IRealm     string            `asn1:"optional,generalstring,explicit,tag:1"`
-	Isname     PrincipalName     `asn1:"optional,explicit,tag:2"`
-	Elements   AuthorizationData `asn1:"explicit,tag:3"`
-}
-
-// ADAndOr implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.3
-type ADAndOr struct {
-	ConditionCount int32             `asn1:"explicit,tag:0"`
-	Elements       AuthorizationData `asn1:"explicit,tag:1"`
-}
-
-// ADMandatoryForKDC implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.4
-type ADMandatoryForKDC AuthorizationData
-
-// Unmarshal bytes into the ADKDCIssued.
-func (a *ADKDCIssued) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// Unmarshal bytes into the AuthorizationData.
-func (a *AuthorizationData) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// Unmarshal bytes into the AuthorizationDataEntry.
-func (a *AuthorizationDataEntry) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Cryptosystem.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Cryptosystem.go
deleted file mode 100644
index 7e8b4ab..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/Cryptosystem.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package types
-
-import (
-	"github.com/jcmturner/gofork/encoding/asn1"
-)
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.2.9
-
-// EncryptedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9
-type EncryptedData struct {
-	EType  int32  `asn1:"explicit,tag:0"`
-	KVNO   int    `asn1:"explicit,optional,tag:1"`
-	Cipher []byte `asn1:"explicit,tag:2"`
-}
-
-// EncryptionKey implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9
-// AKA KeyBlock
-type EncryptionKey struct {
-	KeyType  int32  `asn1:"explicit,tag:0"`
-	KeyValue []byte `asn1:"explicit,tag:1"`
-}
-
-// Checksum implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9
-type Checksum struct {
-	CksumType int32  `asn1:"explicit,tag:0"`
-	Checksum  []byte `asn1:"explicit,tag:1"`
-}
-
-// Unmarshal bytes into the EncryptedData.
-func (a *EncryptedData) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// Marshal the EncryptedData.
-func (a *EncryptedData) Marshal() ([]byte, error) {
-	edb, err := asn1.Marshal(*a)
-	if err != nil {
-		return edb, err
-	}
-	return edb, nil
-}
-
-// Unmarshal bytes into the EncryptionKey.
-func (a *EncryptionKey) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// Unmarshal bytes into the Checksum.
-func (a *Checksum) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/HostAddress.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/HostAddress.go
deleted file mode 100644
index 2f6a5a7..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/HostAddress.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package types
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.2.5
-
-import (
-	"bytes"
-	"fmt"
-	"net"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/addrtype"
-)
-
-/*
-HostAddress and HostAddresses
-
-HostAddress     ::= SEQUENCE  {
-	addr-type       [0] Int32,
-	address         [1] OCTET STRING
-}
-
--- NOTE: HostAddresses is always used as an OPTIONAL field and
--- should not be empty.
-HostAddresses   -- NOTE: subtly different from rfc1510,
-		-- but has a value mapping and encodes the same
-	::= SEQUENCE OF HostAddress
-
-The host address encodings consist of two fields:
-
-addr-type
-	This field specifies the type of address that follows.  Pre-
-	defined values for this field are specified in Section 7.5.3.
-
-address
-	This field encodes a single address of type addr-type.
-*/
-
-// HostAddresses implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5
-type HostAddresses []HostAddress
-
-// HostAddress implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5
-type HostAddress struct {
-	AddrType int32  `asn1:"explicit,tag:0"`
-	Address  []byte `asn1:"explicit,tag:1"`
-}
-
-// GetHostAddress returns a HostAddress struct from a string in the format <hostname>:<port>
-func GetHostAddress(s string) (HostAddress, error) {
-	var h HostAddress
-	cAddr, _, err := net.SplitHostPort(s)
-	if err != nil {
-		return h, fmt.Errorf("invalid format of client address: %v", err)
-	}
-	ip := net.ParseIP(cAddr)
-	var ht int32
-	if ip.To4() != nil {
-		ht = addrtype.IPv4
-		ip = ip.To4()
-	} else if ip.To16() != nil {
-		ht = addrtype.IPv6
-		ip = ip.To16()
-	} else {
-		return h, fmt.Errorf("could not determine client's address types: %v", err)
-	}
-	h = HostAddress{
-		AddrType: ht,
-		Address:  ip,
-	}
-	return h, nil
-}
-
-// GetAddress returns a string representation of the HostAddress.
-func (h *HostAddress) GetAddress() (string, error) {
-	var b []byte
-	_, err := asn1.Unmarshal(h.Address, &b)
-	return string(b), err
-}
-
-// LocalHostAddresses returns a HostAddresses struct for the local machines interface IP addresses.
-func LocalHostAddresses() (ha HostAddresses, err error) {
-	ifs, err := net.Interfaces()
-	if err != nil {
-		return
-	}
-	for _, iface := range ifs {
-		if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 {
-			// Interface is either loopback of not up
-			continue
-		}
-		addrs, err := iface.Addrs()
-		if err != nil {
-			continue
-		}
-		for _, addr := range addrs {
-			var ip net.IP
-			switch v := addr.(type) {
-			case *net.IPNet:
-				ip = v.IP
-			case *net.IPAddr:
-				ip = v.IP
-			}
-			var a HostAddress
-			if ip.To16() == nil {
-				//neither IPv4 or IPv6
-				continue
-			}
-			if ip.To4() != nil {
-				//Is IPv4
-				a.AddrType = addrtype.IPv4
-				a.Address = ip.To4()
-			} else {
-				a.AddrType = addrtype.IPv6
-				a.Address = ip.To16()
-			}
-			ha = append(ha, a)
-		}
-	}
-	return ha, nil
-}
-
-// HostAddressesFromNetIPs returns a HostAddresses type from a slice of net.IP
-func HostAddressesFromNetIPs(ips []net.IP) (ha HostAddresses) {
-	for _, ip := range ips {
-		ha = append(ha, HostAddressFromNetIP(ip))
-	}
-	return ha
-}
-
-// HostAddressFromNetIP returns a HostAddress type from a net.IP
-func HostAddressFromNetIP(ip net.IP) HostAddress {
-	if ip.To4() != nil {
-		//Is IPv4
-		return HostAddress{
-			AddrType: addrtype.IPv4,
-			Address:  ip.To4(),
-		}
-	}
-	return HostAddress{
-		AddrType: addrtype.IPv6,
-		Address:  ip.To16(),
-	}
-}
-
-// HostAddressesEqual tests if two HostAddress slices are equal.
-func HostAddressesEqual(h, a []HostAddress) bool {
-	if len(h) != len(a) {
-		return false
-	}
-	for _, e := range a {
-		var found bool
-		for _, i := range h {
-			if e.Equal(i) {
-				found = true
-				break
-			}
-		}
-		if !found {
-			return false
-		}
-	}
-	return true
-}
-
-// HostAddressesContains tests if a HostAddress is contained in a HostAddress slice.
-func HostAddressesContains(h []HostAddress, a HostAddress) bool {
-	for _, e := range h {
-		if e.Equal(a) {
-			return true
-		}
-	}
-	return false
-}
-
-// Equal tests if the HostAddress is equal to another HostAddress provided.
-func (h *HostAddress) Equal(a HostAddress) bool {
-	if h.AddrType != a.AddrType {
-		return false
-	}
-	return bytes.Equal(h.Address, a.Address)
-}
-
-// Contains tests if a HostAddress is contained within the HostAddresses struct.
-func (h *HostAddresses) Contains(a HostAddress) bool {
-	for _, e := range *h {
-		if e.Equal(a) {
-			return true
-		}
-	}
-	return false
-}
-
-// Equal tests if a HostAddress slice is equal to the HostAddresses struct.
-func (h *HostAddresses) Equal(a []HostAddress) bool {
-	if len(*h) != len(a) {
-		return false
-	}
-	for _, e := range a {
-		if !h.Contains(e) {
-			return false
-		}
-	}
-	return true
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go
deleted file mode 100644
index bd75d5b..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/KerberosFlags.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package types
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.2.8
-
-import (
-	"github.com/jcmturner/gofork/encoding/asn1"
-)
-
-/*
-KerberosFlags
-
-For several message types, a specific constrained bit string type,
-KerberosFlags, is used.
-
-KerberosFlags   ::= BIT STRING (SIZE (32..MAX))
--- minimum number of bits shall be sent,
--- but no fewer than 32
-
-Compatibility note: The following paragraphs describe a change from
-the RFC 1510 description of bit strings that would result in
-incompatility in the case of an implementation that strictly
-conformed to ASN.1 DER and RFC 1510.
-
-ASN.1 bit strings have multiple uses.  The simplest use of a bit
-string is to contain a vector of bits, with no particular meaning
-attached to individual bits.  This vector of bits is not necessarily
-a multiple of eight bits long.  The use in Kerberos of a bit string
-as a compact boolean vector wherein each element has a distinct
-meaning poses some problems.  The natural notation for a compact
-boolean vector is the ASN.1 "NamedBit" notation, and the DER require
-that encodings of a bit string using "NamedBit" notation exclude any
-trailing zero bits.  This truncation is easy to neglect, especially
-given C language implementations that naturally choose to store
-boolean vectors as 32-bit integers.
-
-For example, if the notation for KDCOptions were to include the
-"NamedBit" notation, as in RFC 1510, and a KDCOptions value to be
-encoded had only the "forwardable" (bit number one) bit set, the DER
-encoding MUST include only two bits: the first reserved bit
-("reserved", bit number zero, value zero) and the one-valued bit (bit
-number one) for "forwardable".
-
-Most existing implementations of Kerberos unconditionally send 32
-bits on the wire when encoding bit strings used as boolean vectors.
-This behavior violates the ASN.1 syntax used for flag values in RFC
-1510, but it occurs on such a widely installed base that the protocol
-description is being modified to accommodate it.
-
-Consequently, this document removes the "NamedBit" notations for
-individual bits, relegating them to comments.  The size constraint on
-the KerberosFlags type requires that at least 32 bits be encoded at
-all times, though a lenient implementation MAY choose to accept fewer
-than 32 bits and to treat the missing bits as set to zero.
-
-Currently, no uses of KerberosFlags specify more than 32 bits' worth
-of flags, although future revisions of this document may do so.  When
-more than 32 bits are to be transmitted in a KerberosFlags value,
-future revisions to this document will likely specify that the
-smallest number of bits needed to encode the highest-numbered one-
-valued bit should be sent.  This is somewhat similar to the DER
-encoding of a bit string that is declared with the "NamedBit"
-notation.
-*/
-
-// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags.
-func NewKrbFlags() asn1.BitString {
-	f := asn1.BitString{}
-	f.Bytes = make([]byte, 4)
-	f.BitLength = len(f.Bytes) * 8
-	return f
-}
-
-// SetFlags sets the flags of an ASN1 BitString.
-func SetFlags(f *asn1.BitString, j []int) {
-	for _, i := range j {
-		SetFlag(f, i)
-	}
-}
-
-// SetFlag sets a flag in an ASN1 BitString.
-func SetFlag(f *asn1.BitString, i int) {
-	for l := len(f.Bytes); l < 4; l++ {
-		(*f).Bytes = append((*f).Bytes, byte(0))
-		(*f).BitLength = len((*f).Bytes) * 8
-	}
-	//Which byte?
-	b := i / 8
-	//Which bit in byte
-	p := uint(7 - (i - 8*b))
-	(*f).Bytes[b] = (*f).Bytes[b] | (1 << p)
-}
-
-// UnsetFlags unsets flags in an ASN1 BitString.
-func UnsetFlags(f *asn1.BitString, j []int) {
-	for _, i := range j {
-		UnsetFlag(f, i)
-	}
-}
-
-// UnsetFlag unsets a flag in an ASN1 BitString.
-func UnsetFlag(f *asn1.BitString, i int) {
-	for l := len(f.Bytes); l < 4; l++ {
-		(*f).Bytes = append((*f).Bytes, byte(0))
-		(*f).BitLength = len((*f).Bytes) * 8
-	}
-	//Which byte?
-	b := i / 8
-	//Which bit in byte
-	p := uint(7 - (i - 8*b))
-	(*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p)
-}
-
-// IsFlagSet tests if a flag is set in the ASN1 BitString.
-func IsFlagSet(f *asn1.BitString, i int) bool {
-	//Which byte?
-	b := i / 8
-	//Which bit in byte
-	p := uint(7 - (i - 8*b))
-	if (*f).Bytes[b]&(1<<p) != 0 {
-		return true
-	}
-	return false
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/PAData.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/PAData.go
deleted file mode 100644
index 484ec5b..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/PAData.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package types
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.2.7
-import (
-	"fmt"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"gopkg.in/jcmturner/gokrb5.v7/iana/patype"
-)
-
-// PAData implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7
-type PAData struct {
-	PADataType  int32  `asn1:"explicit,tag:1"`
-	PADataValue []byte `asn1:"explicit,tag:2"`
-}
-
-// PADataSequence implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7
-type PADataSequence []PAData
-
-// MethodData implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.9.1
-type MethodData []PAData
-
-// PAEncTimestamp implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.2
-type PAEncTimestamp EncryptedData
-
-// PAEncTSEnc implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.2
-type PAEncTSEnc struct {
-	PATimestamp time.Time `asn1:"generalized,explicit,tag:0"`
-	PAUSec      int       `asn1:"explicit,optional,tag:1"`
-}
-
-// Contains tests if a PADataSequence contains PA Data of a certain type.
-func (pas *PADataSequence) Contains(patype int32) bool {
-	for _, pa := range *pas {
-		if pa.PADataType == patype {
-			return true
-		}
-	}
-	return false
-}
-
-// GetPAEncTSEncAsnMarshalled returns the bytes of a PAEncTSEnc.
-func GetPAEncTSEncAsnMarshalled() ([]byte, error) {
-	t := time.Now().UTC()
-	p := PAEncTSEnc{
-		PATimestamp: t,
-		PAUSec:      int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
-	}
-	b, err := asn1.Marshal(p)
-	if err != nil {
-		return b, fmt.Errorf("error mashaling PAEncTSEnc: %v", err)
-	}
-	return b, nil
-}
-
-// ETypeInfoEntry implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.4
-type ETypeInfoEntry struct {
-	EType int32  `asn1:"explicit,tag:0"`
-	Salt  []byte `asn1:"explicit,optional,tag:1"`
-}
-
-// ETypeInfo implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.4
-type ETypeInfo []ETypeInfoEntry
-
-// ETypeInfo2Entry implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.5
-type ETypeInfo2Entry struct {
-	EType     int32  `asn1:"explicit,tag:0"`
-	Salt      string `asn1:"explicit,optional,generalstring,tag:1"`
-	S2KParams []byte `asn1:"explicit,optional,tag:2"`
-}
-
-// ETypeInfo2 implements RFC 4120 types: https://tools.ietf.org/html/rfc4120#section-5.2.7.5
-type ETypeInfo2 []ETypeInfo2Entry
-
-// PAReqEncPARep PA Data Type
-type PAReqEncPARep struct {
-	ChksumType int32  `asn1:"explicit,tag:0"`
-	Chksum     []byte `asn1:"explicit,tag:1"`
-}
-
-// Unmarshal bytes into the PAData
-func (pa *PAData) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, pa)
-	return err
-}
-
-// Unmarshal bytes into the PADataSequence
-func (pas *PADataSequence) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, pas)
-	return err
-}
-
-// Unmarshal bytes into the PAReqEncPARep
-func (pa *PAReqEncPARep) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, pa)
-	return err
-}
-
-// Unmarshal bytes into the PAEncTimestamp
-func (pa *PAEncTimestamp) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, pa)
-	return err
-}
-
-// Unmarshal bytes into the PAEncTSEnc
-func (pa *PAEncTSEnc) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, pa)
-	return err
-}
-
-// Unmarshal bytes into the ETypeInfo
-func (a *ETypeInfo) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// Unmarshal bytes into the ETypeInfoEntry
-func (a *ETypeInfoEntry) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// Unmarshal bytes into the ETypeInfo2
-func (a *ETypeInfo2) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// Unmarshal bytes into the ETypeInfo2Entry
-func (a *ETypeInfo2Entry) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
-
-// GetETypeInfo returns an ETypeInfo from the PAData.
-func (pa *PAData) GetETypeInfo() (d ETypeInfo, err error) {
-	if pa.PADataType != patype.PA_ETYPE_INFO {
-		err = fmt.Errorf("PAData does not contain PA EType Info data. TypeID Expected: %v; Actual: %v", patype.PA_ETYPE_INFO, pa.PADataType)
-		return
-	}
-	_, err = asn1.Unmarshal(pa.PADataValue, &d)
-	return
-}
-
-// GetETypeInfo2 returns an ETypeInfo2 from the PAData.
-func (pa *PAData) GetETypeInfo2() (d ETypeInfo2, err error) {
-	if pa.PADataType != patype.PA_ETYPE_INFO2 {
-		err = fmt.Errorf("PAData does not contain PA EType Info 2 data. TypeID Expected: %v; Actual: %v", patype.PA_ETYPE_INFO2, pa.PADataType)
-		return
-	}
-	_, err = asn1.Unmarshal(pa.PADataValue, &d)
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/PrincipalName.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/PrincipalName.go
deleted file mode 100644
index fa6d3ce..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/PrincipalName.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package types
-
-import (
-	"strings"
-
-	"gopkg.in/jcmturner/gokrb5.v7/iana/nametype"
-)
-
-// Reference: https://www.ietf.org/rfc/rfc4120.txt
-// Section: 5.2.2
-
-// PrincipalName implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.2
-type PrincipalName struct {
-	NameType   int32    `asn1:"explicit,tag:0"`
-	NameString []string `asn1:"generalstring,explicit,tag:1"`
-}
-
-// NewPrincipalName creates a new PrincipalName from the name type int32 and name string provided.
-func NewPrincipalName(ntype int32, spn string) PrincipalName {
-	return PrincipalName{
-		NameType:   ntype,
-		NameString: strings.Split(spn, "/"),
-	}
-}
-
-// GetSalt returns a salt derived from the PrincipalName.
-func (pn PrincipalName) GetSalt(realm string) string {
-	var sb []byte
-	sb = append(sb, realm...)
-	for _, n := range pn.NameString {
-		sb = append(sb, n...)
-	}
-	return string(sb)
-}
-
-// Equal tests if the PrincipalName is equal to the one provided.
-func (pn PrincipalName) Equal(n PrincipalName) bool {
-	//https://tools.ietf.org/html/rfc4120#section-6.2 - the name type is not significant when checking for equivalence
-	for i, s := range pn.NameString {
-		if n.NameString[i] != s {
-			return false
-		}
-	}
-	return true
-}
-
-// PrincipalNameString returns the PrincipalName in string form.
-func (pn PrincipalName) PrincipalNameString() string {
-	return strings.Join(pn.NameString, "/")
-}
-
-// ParseSPNString will parse a string in the format <service>/<name>@<realm>
-// a PrincipalName type will be returned with the name type set to KRB_NT_PRINCIPAL(1)
-// and the realm will be returned as a string. If the "@<realm>" suffix
-// is not included in the SPN then the value of realm string returned will be ""
-func ParseSPNString(spn string) (pn PrincipalName, realm string) {
-	if strings.Contains(spn, "@") {
-		s := strings.Split(spn, "@")
-		realm = s[len(s)-1]
-		spn = strings.TrimSuffix(spn, "@"+realm)
-	}
-	pn = NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn)
-	return
-}
diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/TypedData.go b/vendor/gopkg.in/jcmturner/gokrb5.v7/types/TypedData.go
deleted file mode 100644
index 19e9f49..0000000
--- a/vendor/gopkg.in/jcmturner/gokrb5.v7/types/TypedData.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package types
-
-import "github.com/jcmturner/gofork/encoding/asn1"
-
-// TypedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1
-type TypedData struct {
-	DataType  int32  `asn1:"explicit,tag:0"`
-	DataValue []byte `asn1:"optional,explicit,tag:1"`
-}
-
-// TypedDataSequence implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1
-type TypedDataSequence []TypedData
-
-// Unmarshal bytes into the TypedDataSequence.
-func (a *TypedDataSequence) Unmarshal(b []byte) error {
-	_, err := asn1.Unmarshal(b, a)
-	return err
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE b/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go
deleted file mode 100644
index 2b375da..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/claims.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package mstypes
-
-import (
-	"bytes"
-	"errors"
-
-	"gopkg.in/jcmturner/rpc.v1/ndr"
-)
-
-// Compression format assigned numbers.
-const (
-	CompressionFormatNone       uint16 = 0
-	CompressionFormatLZNT1      uint16 = 2
-	CompressionFormatXPress     uint16 = 3
-	CompressionFormatXPressHuff uint16 = 4
-)
-
-// ClaimsSourceTypeAD https://msdn.microsoft.com/en-us/library/hh553809.aspx
-const ClaimsSourceTypeAD uint16 = 1
-
-// Claim Type assigned numbers
-const (
-	ClaimTypeIDInt64    uint16 = 1
-	ClaimTypeIDUInt64   uint16 = 2
-	ClaimTypeIDString   uint16 = 3
-	ClaimsTypeIDBoolean uint16 = 6
-)
-
-// ClaimsBlob implements https://msdn.microsoft.com/en-us/library/hh554119.aspx
-type ClaimsBlob struct {
-	Size        uint32
-	EncodedBlob EncodedBlob
-}
-
-// EncodedBlob are the bytes of the encoded Claims
-type EncodedBlob []byte
-
-// Size returns the size of the bytes of the encoded Claims
-func (b EncodedBlob) Size(c interface{}) int {
-	cb := c.(ClaimsBlob)
-	return int(cb.Size)
-}
-
-// ClaimsSetMetadata implements https://msdn.microsoft.com/en-us/library/hh554073.aspx
-type ClaimsSetMetadata struct {
-	ClaimsSetSize             uint32
-	ClaimsSetBytes            []byte `ndr:"pointer,conformant"`
-	CompressionFormat         uint16 // Enum see constants for options
-	UncompressedClaimsSetSize uint32
-	ReservedType              uint16
-	ReservedFieldSize         uint32
-	ReservedField             []byte `ndr:"pointer,conformant"`
-}
-
-// ClaimsSet reads the ClaimsSet type from the NDR encoded ClaimsSetBytes in the ClaimsSetMetadata
-func (m *ClaimsSetMetadata) ClaimsSet() (c ClaimsSet, err error) {
-	if len(m.ClaimsSetBytes) < 1 {
-		err = errors.New("no bytes available for ClaimsSet")
-		return
-	}
-	// TODO switch statement to decompress ClaimsSetBytes
-	if m.CompressionFormat != CompressionFormatNone {
-		err = errors.New("compressed ClaimsSet not currently supported")
-		return
-	}
-	dec := ndr.NewDecoder(bytes.NewReader(m.ClaimsSetBytes))
-	err = dec.Decode(&c)
-	return
-}
-
-// ClaimsSet implements https://msdn.microsoft.com/en-us/library/hh554122.aspx
-type ClaimsSet struct {
-	ClaimsArrayCount  uint32
-	ClaimsArrays      []ClaimsArray `ndr:"pointer,conformant"`
-	ReservedType      uint16
-	ReservedFieldSize uint32
-	ReservedField     []byte `ndr:"pointer,conformant"`
-}
-
-// ClaimsArray implements https://msdn.microsoft.com/en-us/library/hh536458.aspx
-type ClaimsArray struct {
-	ClaimsSourceType uint16
-	ClaimsCount      uint32
-	ClaimEntries     []ClaimEntry `ndr:"pointer,conformant"`
-}
-
-// ClaimEntry is a NDR union that implements https://msdn.microsoft.com/en-us/library/hh536374.aspx
-type ClaimEntry struct {
-	ID         string           `ndr:"pointer,conformant,varying"`
-	Type       uint16           `ndr:"unionTag"`
-	TypeInt64  ClaimTypeInt64   `ndr:"unionField"`
-	TypeUInt64 ClaimTypeUInt64  `ndr:"unionField"`
-	TypeString ClaimTypeString  `ndr:"unionField"`
-	TypeBool   ClaimTypeBoolean `ndr:"unionField"`
-}
-
-// SwitchFunc is the ClaimEntry union field selection function
-func (u ClaimEntry) SwitchFunc(_ interface{}) string {
-	switch u.Type {
-	case ClaimTypeIDInt64:
-		return "TypeInt64"
-	case ClaimTypeIDUInt64:
-		return "TypeUInt64"
-	case ClaimTypeIDString:
-		return "TypeString"
-	case ClaimsTypeIDBoolean:
-		return "TypeBool"
-	}
-	return ""
-}
-
-// ClaimTypeInt64 is a claim of type int64
-type ClaimTypeInt64 struct {
-	ValueCount uint32
-	Value      []int64 `ndr:"pointer,conformant"`
-}
-
-// ClaimTypeUInt64 is a claim of type uint64
-type ClaimTypeUInt64 struct {
-	ValueCount uint32
-	Value      []uint64 `ndr:"pointer,conformant"`
-}
-
-// ClaimTypeString is a claim of type string
-type ClaimTypeString struct {
-	ValueCount uint32
-	Value      []LPWSTR `ndr:"pointer,conformant"`
-}
-
-// ClaimTypeBoolean is a claim of type bool
-type ClaimTypeBoolean struct {
-	ValueCount uint32
-	Value      []bool `ndr:"pointer,conformant"`
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go
deleted file mode 100644
index 62cac28..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/common.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package mstypes
-
-// LPWSTR implements https://msdn.microsoft.com/en-us/library/cc230355.aspx
-type LPWSTR struct {
-	Value string `ndr:"pointer,conformant,varying"`
-}
-
-func (s *LPWSTR) String() string {
-	return s.Value
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go
deleted file mode 100644
index 5cc952f..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/filetime.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Package mstypes implements representations of Microsoft types
-package mstypes
-
-import (
-	"time"
-)
-
-/*
-FILETIME is a windows data structure.
-Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284%28v=vs.85%29.aspx
-It contains two parts that are 32bit integers:
-	dwLowDateTime
-	dwHighDateTime
-We need to combine these two into one 64bit integer.
-This gives the number of 100 nano second period from January 1, 1601, Coordinated Universal Time (UTC)
-*/
-
-const unixEpochDiff = 116444736000000000
-
-// FileTime implements the Microsoft FILETIME type https://msdn.microsoft.com/en-us/library/cc230324.aspx
-type FileTime struct {
-	LowDateTime  uint32
-	HighDateTime uint32
-}
-
-// Time return a golang Time type from the FileTime
-func (ft FileTime) Time() time.Time {
-	ns := (ft.MSEpoch() - unixEpochDiff) * 100
-	return time.Unix(0, int64(ns)).UTC()
-}
-
-// MSEpoch returns the FileTime as a Microsoft epoch, the number of 100 nano second periods elapsed from January 1, 1601 UTC.
-func (ft FileTime) MSEpoch() int64 {
-	return (int64(ft.HighDateTime) << 32) + int64(ft.LowDateTime)
-}
-
-// Unix returns the FileTime as a Unix time, the number of seconds elapsed since January 1, 1970 UTC.
-func (ft FileTime) Unix() int64 {
-	return (ft.MSEpoch() - unixEpochDiff) / 10000000
-}
-
-// GetFileTime returns a FileTime type from the provided Golang Time type.
-func GetFileTime(t time.Time) FileTime {
-	ns := t.UnixNano()
-	fp := (ns / 100) + unixEpochDiff
-	hd := fp >> 32
-	ld := fp - (hd << 32)
-	return FileTime{
-		LowDateTime:  uint32(ld),
-		HighDateTime: uint32(hd),
-	}
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go
deleted file mode 100644
index 7915137..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/group_membership.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package mstypes
-
-// GroupMembership implements https://msdn.microsoft.com/en-us/library/cc237945.aspx
-// RelativeID : A 32-bit unsigned integer that contains the RID of a particular group.
-// The possible values for the Attributes flags are identical to those specified in KERB_SID_AND_ATTRIBUTES
-type GroupMembership struct {
-	RelativeID uint32
-	Attributes uint32
-}
-
-// DomainGroupMembership implements https://msdn.microsoft.com/en-us/library/hh536344.aspx
-// DomainId: A SID structure that contains the SID for the domain.This member is used in conjunction with the GroupIds members to create group SIDs for the device.
-// GroupCount: A 32-bit unsigned integer that contains the number of groups within the domain to which the account belongs.
-// GroupIds: A pointer to a list of GROUP_MEMBERSHIP structures that contain the groups to which the account belongs in the domain. The number of groups in this list MUST be equal to GroupCount.
-type DomainGroupMembership struct {
-	DomainID   RPCSID `ndr:"pointer"`
-	GroupCount uint32
-	GroupIDs   []GroupMembership `ndr:"pointer,conformant"` // Size is value of GroupCount
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go
deleted file mode 100644
index 61ac39b..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/kerb_sid_and_attributes.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package mstypes
-
-// Attributes of a security group membership and can be combined by using the bitwise OR operation.
-// They are used by an access check mechanism to specify whether the membership is to be used in an access check decision.
-const (
-	SEGroupMandatory        = 31
-	SEGroupEnabledByDefault = 30
-	SEGroupEnabled          = 29
-	SEGroupOwner            = 28
-	SEGroupResource         = 2
-	//All other bits MUST be set to zero and MUST be  ignored on receipt.
-)
-
-// KerbSidAndAttributes implements https://msdn.microsoft.com/en-us/library/cc237947.aspx
-type KerbSidAndAttributes struct {
-	SID        RPCSID `ndr:"pointer"` // A pointer to an RPC_SID structure.
-	Attributes uint32
-}
-
-// SetFlag sets a flag in a uint32 attribute value.
-func SetFlag(a *uint32, i uint) {
-	*a = *a | (1 << (31 - i))
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go
deleted file mode 100644
index 24495bc..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/reader.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package mstypes
-
-import (
-	"bufio"
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-// Byte sizes of primitive types
-const (
-	SizeBool   = 1
-	SizeChar   = 1
-	SizeUint8  = 1
-	SizeUint16 = 2
-	SizeUint32 = 4
-	SizeUint64 = 8
-	SizeEnum   = 2
-	SizeSingle = 4
-	SizeDouble = 8
-	SizePtr    = 4
-)
-
-// Reader reads simple byte stream data into a Go representations
-type Reader struct {
-	r *bufio.Reader // source of the data
-}
-
-// NewReader creates a new instance of a simple Reader.
-func NewReader(r io.Reader) *Reader {
-	reader := new(Reader)
-	reader.r = bufio.NewReader(r)
-	return reader
-}
-
-func (r *Reader) Read(p []byte) (n int, err error) {
-	return r.r.Read(p)
-}
-
-func (r *Reader) Uint8() (uint8, error) {
-	b, err := r.r.ReadByte()
-	if err != nil {
-		return uint8(0), err
-	}
-	return uint8(b), nil
-}
-
-func (r *Reader) Uint16() (uint16, error) {
-	b, err := r.ReadBytes(SizeUint16)
-	if err != nil {
-		return uint16(0), err
-	}
-	return binary.LittleEndian.Uint16(b), nil
-}
-
-func (r *Reader) Uint32() (uint32, error) {
-	b, err := r.ReadBytes(SizeUint32)
-	if err != nil {
-		return uint32(0), err
-	}
-	return binary.LittleEndian.Uint32(b), nil
-}
-
-func (r *Reader) Uint64() (uint64, error) {
-	b, err := r.ReadBytes(SizeUint64)
-	if err != nil {
-		return uint64(0), err
-	}
-	return binary.LittleEndian.Uint64(b), nil
-}
-
-func (r *Reader) FileTime() (f FileTime, err error) {
-	f.LowDateTime, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	f.HighDateTime, err = r.Uint32()
-	if err != nil {
-		return
-	}
-	return
-}
-
-// UTF16String returns a string that is UTF16 encoded in a byte slice. n is the number of bytes representing the string
-func (r *Reader) UTF16String(n int) (str string, err error) {
-	//Length divided by 2 as each run is 16bits = 2bytes
-	s := make([]rune, n/2, n/2)
-	for i := 0; i < len(s); i++ {
-		var u uint16
-		u, err = r.Uint16()
-		if err != nil {
-			return
-		}
-		s[i] = rune(u)
-	}
-	str = string(s)
-	return
-}
-
-// readBytes returns a number of bytes from the NDR byte stream.
-func (r *Reader) ReadBytes(n int) ([]byte, error) {
-	//TODO make this take an int64 as input to allow for larger values on all systems?
-	b := make([]byte, n, n)
-	m, err := r.r.Read(b)
-	if err != nil || m != n {
-		return b, fmt.Errorf("error reading bytes from stream: %v", err)
-	}
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go
deleted file mode 100644
index 4bf02e0..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/rpc_unicode_string.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package mstypes
-
-// RPCUnicodeString implements https://msdn.microsoft.com/en-us/library/cc230365.aspx
-type RPCUnicodeString struct {
-	Length        uint16 // The length, in bytes, of the string pointed to by the Buffer member, not including the terminating null character if any. The length MUST be a multiple of 2. The length SHOULD equal the entire size of the Buffer, in which case there is no terminating null character. Any method that accesses this structure MUST use the Length specified instead of relying on the presence or absence of a null character.
-	MaximumLength uint16 // The maximum size, in bytes, of the string pointed to by Buffer. The size MUST be a multiple of 2. If not, the size MUST be decremented by 1 prior to use. This value MUST not be less than Length.
-	Value         string `ndr:"pointer,conformant,varying"`
-}
-
-// String returns the RPCUnicodeString string value
-func (r *RPCUnicodeString) String() string {
-	return r.Value
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go
deleted file mode 100644
index 98a9c5a..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/sid.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package mstypes
-
-import (
-	"encoding/binary"
-	"encoding/hex"
-	"fmt"
-)
-
-// RPCSID implements https://msdn.microsoft.com/en-us/library/cc230364.aspx
-type RPCSID struct {
-	Revision            uint8    // An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01.
-	SubAuthorityCount   uint8    // An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15.
-	IdentifierAuthority [6]byte  // An RPC_SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority.
-	SubAuthority        []uint32 `ndr:"conformant"` // A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount.
-}
-
-// String returns the string representation of the RPC_SID.
-func (s *RPCSID) String() string {
-	var str string
-	b := append(make([]byte, 2, 2), s.IdentifierAuthority[:]...)
-	// For a strange reason this is read big endian: https://msdn.microsoft.com/en-us/library/dd302645.aspx
-	i := binary.BigEndian.Uint64(b)
-	if i >= 4294967296 {
-		str = fmt.Sprintf("S-1-0x%s", hex.EncodeToString(s.IdentifierAuthority[:]))
-	} else {
-		str = fmt.Sprintf("S-1-%d", i)
-	}
-	for _, sub := range s.SubAuthority {
-		str = fmt.Sprintf("%s-%d", str, sub)
-	}
-	return str
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go b/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go
deleted file mode 100644
index fcf0a5d..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/mstypes/user_session_key.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package mstypes
-
-// CypherBlock implements https://msdn.microsoft.com/en-us/library/cc237040.aspx
-type CypherBlock struct {
-	Data [8]byte // size = 8
-}
-
-// UserSessionKey implements https://msdn.microsoft.com/en-us/library/cc237080.aspx
-type UserSessionKey struct {
-	CypherBlock [2]CypherBlock // size = 2
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go
deleted file mode 100644
index 5e2def2..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/arrays.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package ndr
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"strconv"
-)
-
-// intFromTag returns an int that is a value in a struct tag key/value pair
-func intFromTag(tag reflect.StructTag, key string) (int, error) {
-	ndrTag := parseTags(tag)
-	d := 1
-	if n, ok := ndrTag.Map[key]; ok {
-		i, err := strconv.Atoi(n)
-		if err != nil {
-			return d, fmt.Errorf("invalid dimensions tag [%s]: %v", n, err)
-		}
-		d = i
-	}
-	return d, nil
-}
-
-// parseDimensions returns the a slice of the size of each dimension and type of the member at the deepest level.
-func parseDimensions(v reflect.Value) (l []int, tb reflect.Type) {
-	if v.Kind() == reflect.Ptr {
-		v = v.Elem()
-	}
-	t := v.Type()
-	if t.Kind() == reflect.Ptr {
-		t = t.Elem()
-	}
-	if t.Kind() != reflect.Array && t.Kind() != reflect.Slice {
-		return
-	}
-	l = append(l, v.Len())
-	if t.Elem().Kind() == reflect.Array || t.Elem().Kind() == reflect.Slice {
-		// contains array or slice
-		var m []int
-		m, tb = parseDimensions(v.Index(0))
-		l = append(l, m...)
-	} else {
-		tb = t.Elem()
-	}
-	return
-}
-
-// sliceDimensions returns the count of dimensions a slice has.
-func sliceDimensions(t reflect.Type) (d int, tb reflect.Type) {
-	if t.Kind() == reflect.Ptr {
-		t = t.Elem()
-	}
-	if t.Kind() == reflect.Slice {
-		d++
-		var n int
-		n, tb = sliceDimensions(t.Elem())
-		d += n
-	} else {
-		tb = t
-	}
-	return
-}
-
-// makeSubSlices is a deep recursive creation/initialisation of multi-dimensional slices.
-// Takes the reflect.Value of the 1st dimension and a slice of the lengths of the sub dimensions
-func makeSubSlices(v reflect.Value, l []int) {
-	ty := v.Type().Elem()
-	if ty.Kind() != reflect.Slice {
-		return
-	}
-	for i := 0; i < v.Len(); i++ {
-		s := reflect.MakeSlice(ty, l[0], l[0])
-		v.Index(i).Set(s)
-		// Are there more sub dimensions?
-		if len(l) > 1 {
-			makeSubSlices(v.Index(i), l[1:])
-		}
-	}
-	return
-}
-
-// multiDimensionalIndexPermutations returns all the permutations of the indexes of a multi-dimensional slice.
-// The input is a slice of integers that indicates the max size/length of each dimension
-func multiDimensionalIndexPermutations(l []int) (ps [][]int) {
-	z := make([]int, len(l), len(l)) // The zeros permutation
-	ps = append(ps, z)
-	// for each dimension, in reverse
-	for i := len(l) - 1; i >= 0; i-- {
-		ws := make([][]int, len(ps))
-		copy(ws, ps)
-		//create a permutation for each of the iterations of the current dimension
-		for j := 1; j <= l[i]-1; j++ {
-			// For each existing permutation
-			for _, p := range ws {
-				np := make([]int, len(p), len(p))
-				copy(np, p)
-				np[i] = j
-				ps = append(ps, np)
-			}
-		}
-	}
-	return
-}
-
-// precedingMax reads off the next conformant max value
-func (dec *Decoder) precedingMax() uint32 {
-	m := dec.conformantMax[0]
-	dec.conformantMax = dec.conformantMax[1:]
-	return m
-}
-
-// fillFixedArray establishes if the fixed array is uni or multi dimensional and then fills it.
-func (dec *Decoder) fillFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	l, t := parseDimensions(v)
-	if t.Kind() == reflect.String {
-		tag = reflect.StructTag(subStringArrayTag)
-	}
-	if len(l) < 1 {
-		return errors.New("could not establish dimensions of fixed array")
-	}
-	if len(l) == 1 {
-		err := dec.fillUniDimensionalFixedArray(v, tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill uni-dimensional fixed array: %v", err)
-		}
-		return nil
-	}
-	// Fixed array is multidimensional
-	ps := multiDimensionalIndexPermutations(l[:len(l)-1])
-	for _, p := range ps {
-		// Get current multi-dimensional index to fill
-		a := v
-		for _, i := range p {
-			a = a.Index(i)
-		}
-		// fill with the last dimension array
-		err := dec.fillUniDimensionalFixedArray(a, tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill dimension %v of multi-dimensional fixed array: %v", p, err)
-		}
-	}
-	return nil
-}
-
-// readUniDimensionalFixedArray reads an array (not slice) from the byte stream.
-func (dec *Decoder) fillUniDimensionalFixedArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	for i := 0; i < v.Len(); i++ {
-		err := dec.fill(v.Index(i), tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill index %d of fixed array: %v", i, err)
-		}
-	}
-	return nil
-}
-
-// fillConformantArray establishes if the conformant array is uni or multi dimensional and then fills the slice.
-func (dec *Decoder) fillConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	d, _ := sliceDimensions(v.Type())
-	if d > 1 {
-		err := dec.fillMultiDimensionalConformantArray(v, d, tag, def)
-		if err != nil {
-			return err
-		}
-	} else {
-		err := dec.fillUniDimensionalConformantArray(v, tag, def)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// fillUniDimensionalConformantArray fills the uni-dimensional slice value.
-func (dec *Decoder) fillUniDimensionalConformantArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	m := dec.precedingMax()
-	n := int(m)
-	a := reflect.MakeSlice(v.Type(), n, n)
-	for i := 0; i < n; i++ {
-		err := dec.fill(a.Index(i), tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill index %d of uni-dimensional conformant array: %v", i, err)
-		}
-	}
-	v.Set(a)
-	return nil
-}
-
-// fillMultiDimensionalConformantArray fills the multi-dimensional slice value provided from conformant array data.
-// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
-// method not to panic.
-func (dec *Decoder) fillMultiDimensionalConformantArray(v reflect.Value, d int, tag reflect.StructTag, def *[]deferedPtr) error {
-	// Read the max size of each dimensions from the ndr stream
-	l := make([]int, d, d)
-	for i := range l {
-		l[i] = int(dec.precedingMax())
-	}
-	// Initialise size of slices
-	//   Initialise the size of the 1st dimension
-	ty := v.Type()
-	v.Set(reflect.MakeSlice(ty, l[0], l[0]))
-	// Initialise the size of the other dimensions recursively
-	makeSubSlices(v, l[1:])
-
-	// Get all permutations of the indexes and go through each and fill
-	ps := multiDimensionalIndexPermutations(l)
-	for _, p := range ps {
-		// Get current multi-dimensional index to fill
-		a := v
-		for _, i := range p {
-			a = a.Index(i)
-		}
-		err := dec.fill(a, tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
-		}
-	}
-	return nil
-}
-
-// fillVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice.
-func (dec *Decoder) fillVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	d, t := sliceDimensions(v.Type())
-	if d > 1 {
-		err := dec.fillMultiDimensionalVaryingArray(v, t, d, tag, def)
-		if err != nil {
-			return err
-		}
-	} else {
-		err := dec.fillUniDimensionalVaryingArray(v, tag, def)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// fillUniDimensionalVaryingArray fills the uni-dimensional slice value.
-func (dec *Decoder) fillUniDimensionalVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	o, err := dec.readUint32()
-	if err != nil {
-		return fmt.Errorf("could not read offset of uni-dimensional varying array: %v", err)
-	}
-	s, err := dec.readUint32()
-	if err != nil {
-		return fmt.Errorf("could not establish actual count of uni-dimensional varying array: %v", err)
-	}
-	t := v.Type()
-	// Total size of the array is the offset in the index being passed plus the actual count of elements being passed.
-	n := int(s + o)
-	a := reflect.MakeSlice(t, n, n)
-	// Populate the array starting at the offset specified
-	for i := int(o); i < n; i++ {
-		err := dec.fill(a.Index(i), tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill index %d of uni-dimensional varying array: %v", i, err)
-		}
-	}
-	v.Set(a)
-	return nil
-}
-
-// fillMultiDimensionalVaryingArray fills the multi-dimensional slice value provided from varying array data.
-// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
-// method not to panic.
-func (dec *Decoder) fillMultiDimensionalVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error {
-	// Read the offset and actual count of each dimensions from the ndr stream
-	o := make([]int, d, d)
-	l := make([]int, d, d)
-	for i := range l {
-		off, err := dec.readUint32()
-		if err != nil {
-			return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err)
-		}
-		o[i] = int(off)
-		s, err := dec.readUint32()
-		if err != nil {
-			return fmt.Errorf("could not read size of dimension %d: %v", i+1, err)
-		}
-		l[i] = int(s) + int(off)
-	}
-	// Initialise size of slices
-	//   Initialise the size of the 1st dimension
-	ty := v.Type()
-	v.Set(reflect.MakeSlice(ty, l[0], l[0]))
-	// Initialise the size of the other dimensions recursively
-	makeSubSlices(v, l[1:])
-
-	// Get all permutations of the indexes and go through each and fill
-	ps := multiDimensionalIndexPermutations(l)
-	for _, p := range ps {
-		// Get current multi-dimensional index to fill
-		a := v
-		var os bool // should this permutation be skipped due to the offset of any of the dimensions?
-		for i, j := range p {
-			if j < o[i] {
-				os = true
-				break
-			}
-			a = a.Index(j)
-		}
-		if os {
-			// This permutation should be skipped as it is less than the offset for one of the dimensions.
-			continue
-		}
-		err := dec.fill(a, tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
-		}
-	}
-	return nil
-}
-
-// fillConformantVaryingArray establishes if the varying array is uni or multi dimensional and then fills the slice.
-func (dec *Decoder) fillConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	d, t := sliceDimensions(v.Type())
-	if d > 1 {
-		err := dec.fillMultiDimensionalConformantVaryingArray(v, t, d, tag, def)
-		if err != nil {
-			return err
-		}
-	} else {
-		err := dec.fillUniDimensionalConformantVaryingArray(v, tag, def)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// fillUniDimensionalConformantVaryingArray fills the uni-dimensional slice value.
-func (dec *Decoder) fillUniDimensionalConformantVaryingArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	m := dec.precedingMax()
-	o, err := dec.readUint32()
-	if err != nil {
-		return fmt.Errorf("could not read offset of uni-dimensional conformant varying array: %v", err)
-	}
-	s, err := dec.readUint32()
-	if err != nil {
-		return fmt.Errorf("could not establish actual count of uni-dimensional conformant varying array: %v", err)
-	}
-	if m < o+s {
-		return errors.New("max count is less than the offset plus actual count")
-	}
-	t := v.Type()
-	n := int(s)
-	a := reflect.MakeSlice(t, n, n)
-	for i := int(o); i < n; i++ {
-		err := dec.fill(a.Index(i), tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill index %d of uni-dimensional conformant varying array: %v", i, err)
-		}
-	}
-	v.Set(a)
-	return nil
-}
-
-// fillMultiDimensionalConformantVaryingArray fills the multi-dimensional slice value provided from conformant varying array data.
-// The number of dimensions must be specified. This must be less than or equal to the dimensions in the slice for this
-// method not to panic.
-func (dec *Decoder) fillMultiDimensionalConformantVaryingArray(v reflect.Value, t reflect.Type, d int, tag reflect.StructTag, def *[]deferedPtr) error {
-	// Read the offset and actual count of each dimensions from the ndr stream
-	m := make([]int, d, d)
-	for i := range m {
-		m[i] = int(dec.precedingMax())
-	}
-	o := make([]int, d, d)
-	l := make([]int, d, d)
-	for i := range l {
-		off, err := dec.readUint32()
-		if err != nil {
-			return fmt.Errorf("could not read offset of dimension %d: %v", i+1, err)
-		}
-		o[i] = int(off)
-		s, err := dec.readUint32()
-		if err != nil {
-			return fmt.Errorf("could not read actual count of dimension %d: %v", i+1, err)
-		}
-		if m[i] < int(s)+int(off) {
-			m[i] = int(s) + int(off)
-		}
-		l[i] = int(s)
-	}
-	// Initialise size of slices
-	//   Initialise the size of the 1st dimension
-	ty := v.Type()
-	v.Set(reflect.MakeSlice(ty, m[0], m[0]))
-	// Initialise the size of the other dimensions recursively
-	makeSubSlices(v, m[1:])
-
-	// Get all permutations of the indexes and go through each and fill
-	ps := multiDimensionalIndexPermutations(m)
-	for _, p := range ps {
-		// Get current multi-dimensional index to fill
-		a := v
-		var os bool // should this permutation be skipped due to the offset of any of the dimensions or max is higher than the actual count being passed
-		for i, j := range p {
-			if j < o[i] || j >= l[i] {
-				os = true
-				break
-			}
-			a = a.Index(j)
-		}
-		if os {
-			// This permutation should be skipped as it is less than the offset for one of the dimensions.
-			continue
-		}
-		err := dec.fill(a, tag, def)
-		if err != nil {
-			return fmt.Errorf("could not fill index %v of slice: %v", p, err)
-		}
-	}
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go
deleted file mode 100644
index 6157b4e..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/decoder.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Package ndr provides the ability to unmarshal NDR encoded byte steams into Go data structures
-package ndr
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"reflect"
-	"strings"
-)
-
-// Struct tag values
-const (
-	TagConformant = "conformant"
-	TagVarying    = "varying"
-	TagPointer    = "pointer"
-	TagPipe       = "pipe"
-)
-
-// Decoder unmarshals NDR byte stream data into a Go struct representation
-type Decoder struct {
-	r             *bufio.Reader // source of the data
-	size          int           // initial size of bytes in buffer
-	ch            CommonHeader  // NDR common header
-	ph            PrivateHeader // NDR private header
-	conformantMax []uint32      // conformant max values that were moved to the beginning of the structure
-	s             interface{}   // pointer to the structure being populated
-	current       []string      // keeps track of the current field being populated
-}
-
-type deferedPtr struct {
-	v   reflect.Value
-	tag reflect.StructTag
-}
-
-// NewDecoder creates a new instance of a NDR Decoder.
-func NewDecoder(r io.Reader) *Decoder {
-	dec := new(Decoder)
-	dec.r = bufio.NewReader(r)
-	dec.r.Peek(int(commonHeaderBytes)) // For some reason an operation is needed on the buffer to initialise it so Buffered() != 0
-	dec.size = dec.r.Buffered()
-	return dec
-}
-
-// Decode unmarshals the NDR encoded bytes into the pointer of a struct provided.
-func (dec *Decoder) Decode(s interface{}) error {
-	dec.s = s
-	err := dec.readCommonHeader()
-	if err != nil {
-		return err
-	}
-	err = dec.readPrivateHeader()
-	if err != nil {
-		return err
-	}
-	_, err = dec.r.Discard(4) //The next 4 bytes are an RPC unique pointer referent. We just skip these.
-	if err != nil {
-		return Errorf("unable to process byte stream: %v", err)
-	}
-
-	return dec.process(s, reflect.StructTag(""))
-}
-
-func (dec *Decoder) process(s interface{}, tag reflect.StructTag) error {
-	// Scan for conformant fields as their max counts are moved to the beginning
-	// http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_37
-	err := dec.scanConformantArrays(s, tag)
-	if err != nil {
-		return err
-	}
-	// Recursively fill the struct fields
-	var localDef []deferedPtr
-	err = dec.fill(s, tag, &localDef)
-	if err != nil {
-		return Errorf("could not decode: %v", err)
-	}
-	// Read any deferred referents associated with pointers
-	for _, p := range localDef {
-		err = dec.process(p.v, p.tag)
-		if err != nil {
-			return fmt.Errorf("could not decode deferred referent: %v", err)
-		}
-	}
-	return nil
-}
-
-// scanConformantArrays scans the structure for embedded conformant fields and captures the maximum element counts for
-// dimensions of the array that are moved to the beginning of the structure.
-func (dec *Decoder) scanConformantArrays(s interface{}, tag reflect.StructTag) error {
-	err := dec.conformantScan(s, tag)
-	if err != nil {
-		return fmt.Errorf("failed to scan for embedded conformant arrays: %v", err)
-	}
-	for i := range dec.conformantMax {
-		dec.conformantMax[i], err = dec.readUint32()
-		if err != nil {
-			return fmt.Errorf("could not read preceding conformant max count index %d: %v", i, err)
-		}
-	}
-	return nil
-}
-
-// conformantScan inspects the structure's fields for whether they are conformant.
-func (dec *Decoder) conformantScan(s interface{}, tag reflect.StructTag) error {
-	ndrTag := parseTags(tag)
-	if ndrTag.HasValue(TagPointer) {
-		return nil
-	}
-	v := getReflectValue(s)
-	switch v.Kind() {
-	case reflect.Struct:
-		for i := 0; i < v.NumField(); i++ {
-			err := dec.conformantScan(v.Field(i), v.Type().Field(i).Tag)
-			if err != nil {
-				return err
-			}
-		}
-	case reflect.String:
-		if !ndrTag.HasValue(TagConformant) {
-			break
-		}
-		dec.conformantMax = append(dec.conformantMax, uint32(0))
-	case reflect.Slice:
-		if !ndrTag.HasValue(TagConformant) {
-			break
-		}
-		d, t := sliceDimensions(v.Type())
-		for i := 0; i < d; i++ {
-			dec.conformantMax = append(dec.conformantMax, uint32(0))
-		}
-		// For string arrays there is a common max for the strings within the array.
-		if t.Kind() == reflect.String {
-			dec.conformantMax = append(dec.conformantMax, uint32(0))
-		}
-	}
-	return nil
-}
-
-func (dec *Decoder) isPointer(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) (bool, error) {
-	// Pointer so defer filling the referent
-	ndrTag := parseTags(tag)
-	if ndrTag.HasValue(TagPointer) {
-		p, err := dec.readUint32()
-		if err != nil {
-			return true, fmt.Errorf("could not read pointer: %v", err)
-		}
-		ndrTag.delete(TagPointer)
-		if p != 0 {
-			// if pointer is not zero add to the deferred items at end of stream
-			*def = append(*def, deferedPtr{v, ndrTag.StructTag()})
-		}
-		return true, nil
-	}
-	return false, nil
-}
-
-func getReflectValue(s interface{}) (v reflect.Value) {
-	if r, ok := s.(reflect.Value); ok {
-		v = r
-	} else {
-		if reflect.ValueOf(s).Kind() == reflect.Ptr {
-			v = reflect.ValueOf(s).Elem()
-		}
-	}
-	return
-}
-
-// fill populates fields with values from the NDR byte stream.
-func (dec *Decoder) fill(s interface{}, tag reflect.StructTag, localDef *[]deferedPtr) error {
-	v := getReflectValue(s)
-
-	//// Pointer so defer filling the referent
-	ptr, err := dec.isPointer(v, tag, localDef)
-	if err != nil {
-		return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err)
-	}
-	if ptr {
-		return nil
-	}
-
-	// Populate the value from the byte stream
-	switch v.Kind() {
-	case reflect.Struct:
-		dec.current = append(dec.current, v.Type().Name()) //Track the current field being filled
-		// in case struct is a union, track this and the selected union field for efficiency
-		var unionTag reflect.Value
-		var unionField string // field to fill if struct is a union
-		// Go through each field in the struct and recursively fill
-		for i := 0; i < v.NumField(); i++ {
-			fieldName := v.Type().Field(i).Name
-			dec.current = append(dec.current, fieldName) //Track the current field being filled
-			//fmt.Fprintf(os.Stderr, "DEBUG Decoding: %s\n", strings.Join(dec.current, "/"))
-			structTag := v.Type().Field(i).Tag
-			ndrTag := parseTags(structTag)
-
-			// Union handling
-			if !unionTag.IsValid() {
-				// Is this field a union tag?
-				unionTag = dec.isUnion(v.Field(i), structTag)
-			} else {
-				// What is the selected field value of the union if we don't already know
-				if unionField == "" {
-					unionField, err = unionSelectedField(v, unionTag)
-					if err != nil {
-						return fmt.Errorf("could not determine selected union value field for %s with discriminat"+
-							" tag %s: %v", v.Type().Name(), unionTag, err)
-					}
-				}
-				if ndrTag.HasValue(TagUnionField) && fieldName != unionField {
-					// is a union and this field has not been selected so will skip it.
-					dec.current = dec.current[:len(dec.current)-1] //This field has been skipped so remove it from the current field tracker
-					continue
-				}
-			}
-
-			// Check if field is a pointer
-			if v.Field(i).Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) &&
-				v.Field(i).Type().Kind() == reflect.Slice && v.Field(i).Type().Elem().Kind() == reflect.Uint8 {
-				//field is for rawbytes
-				structTag, err = addSizeToTag(v, v.Field(i), structTag)
-				if err != nil {
-					return fmt.Errorf("could not get rawbytes field(%s) size: %v", strings.Join(dec.current, "/"), err)
-				}
-				ptr, err := dec.isPointer(v.Field(i), structTag, localDef)
-				if err != nil {
-					return fmt.Errorf("could not process struct field(%s): %v", strings.Join(dec.current, "/"), err)
-				}
-				if !ptr {
-					err := dec.readRawBytes(v.Field(i), structTag)
-					if err != nil {
-						return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err)
-					}
-				}
-			} else {
-				err := dec.fill(v.Field(i), structTag, localDef)
-				if err != nil {
-					return fmt.Errorf("could not fill struct field(%s): %v", strings.Join(dec.current, "/"), err)
-				}
-			}
-			dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker
-		}
-		dec.current = dec.current[:len(dec.current)-1] //This field has been filled so remove it from the current field tracker
-	case reflect.Bool:
-		i, err := dec.readBool()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Uint8:
-		i, err := dec.readUint8()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Uint16:
-		i, err := dec.readUint16()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Uint32:
-		i, err := dec.readUint32()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Uint64:
-		i, err := dec.readUint64()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Int8:
-		i, err := dec.readInt8()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Int16:
-		i, err := dec.readInt16()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Int32:
-		i, err := dec.readInt32()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Int64:
-		i, err := dec.readInt64()
-		if err != nil {
-			return fmt.Errorf("could not fill %s: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.String:
-		ndrTag := parseTags(tag)
-		conformant := ndrTag.HasValue(TagConformant)
-		// strings are always varying so this is assumed without an explicit tag
-		var s string
-		var err error
-		if conformant {
-			s, err = dec.readConformantVaryingString(localDef)
-			if err != nil {
-				return fmt.Errorf("could not fill with conformant varying string: %v", err)
-			}
-		} else {
-			s, err = dec.readVaryingString(localDef)
-			if err != nil {
-				return fmt.Errorf("could not fill with varying string: %v", err)
-			}
-		}
-		v.Set(reflect.ValueOf(s))
-	case reflect.Float32:
-		i, err := dec.readFloat32()
-		if err != nil {
-			return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Float64:
-		i, err := dec.readFloat64()
-		if err != nil {
-			return fmt.Errorf("could not fill %v: %v", v.Type().Name(), err)
-		}
-		v.Set(reflect.ValueOf(i))
-	case reflect.Array:
-		err := dec.fillFixedArray(v, tag, localDef)
-		if err != nil {
-			return err
-		}
-	case reflect.Slice:
-		if v.Type().Implements(reflect.TypeOf(new(RawBytes)).Elem()) && v.Type().Elem().Kind() == reflect.Uint8 {
-			//field is for rawbytes
-			err := dec.readRawBytes(v, tag)
-			if err != nil {
-				return fmt.Errorf("could not fill raw bytes struct field(%s): %v", strings.Join(dec.current, "/"), err)
-			}
-			break
-		}
-		ndrTag := parseTags(tag)
-		conformant := ndrTag.HasValue(TagConformant)
-		varying := ndrTag.HasValue(TagVarying)
-		if ndrTag.HasValue(TagPipe) {
-			err := dec.fillPipe(v, tag)
-			if err != nil {
-				return err
-			}
-			break
-		}
-		_, t := sliceDimensions(v.Type())
-		if t.Kind() == reflect.String && !ndrTag.HasValue(subStringArrayValue) {
-			// String array
-			err := dec.readStringsArray(v, tag, localDef)
-			if err != nil {
-				return err
-			}
-			break
-		}
-		// varying is assumed as fixed arrays use the Go array type rather than slice
-		if conformant && varying {
-			err := dec.fillConformantVaryingArray(v, tag, localDef)
-			if err != nil {
-				return err
-			}
-		} else if !conformant && varying {
-			err := dec.fillVaryingArray(v, tag, localDef)
-			if err != nil {
-				return err
-			}
-		} else {
-			//default to conformant and not varying
-			err := dec.fillConformantArray(v, tag, localDef)
-			if err != nil {
-				return err
-			}
-		}
-	default:
-		return fmt.Errorf("unsupported type")
-	}
-	return nil
-}
-
-// readBytes returns a number of bytes from the NDR byte stream.
-func (dec *Decoder) readBytes(n int) ([]byte, error) {
-	//TODO make this take an int64 as input to allow for larger values on all systems?
-	b := make([]byte, n, n)
-	m, err := dec.r.Read(b)
-	if err != nil || m != n {
-		return b, fmt.Errorf("error reading bytes from stream: %v", err)
-	}
-	return b, nil
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go
deleted file mode 100644
index 9971194..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/error.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package ndr
-
-import "fmt"
-
-// Malformed implements the error interface for malformed NDR encoding errors.
-type Malformed struct {
-	EText string
-}
-
-// Error implements the error interface on the Malformed struct.
-func (e Malformed) Error() string {
-	return fmt.Sprintf("malformed NDR stream: %s", e.EText)
-}
-
-// Errorf formats an error message into a malformed NDR error.
-func Errorf(format string, a ...interface{}) Malformed {
-	return Malformed{EText: fmt.Sprintf(format, a...)}
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go
deleted file mode 100644
index 1970ddb..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/header.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package ndr
-
-import (
-	"encoding/binary"
-	"fmt"
-)
-
-/*
-Serialization Version 1
-https://msdn.microsoft.com/en-us/library/cc243563.aspx
-
-Common Header - https://msdn.microsoft.com/en-us/library/cc243890.aspx
-8 bytes in total:
-- First byte - Version: Must equal 1
-- Second byte -  1st 4 bits: Endianess (0=Big; 1=Little); 2nd 4 bits: Character Encoding (0=ASCII; 1=EBCDIC)
-- 3rd - Floating point representation (This does not seem to be the case in examples for Microsoft test sources)
-- 4th - Common Header Length: Must equal 8
-- 5th - 8th - Filler: MUST be set to 0xcccccccc on marshaling, and SHOULD be ignored during unmarshaling.
-
-Private Header - https://msdn.microsoft.com/en-us/library/cc243919.aspx
-8 bytes in total:
-- First 4 bytes - Indicates the length of a serialized top-level type in the octet stream. It MUST include the padding length and exclude the header itself.
-- Second 4 bytes - Filler: MUST be set to 0 (zero) during marshaling, and SHOULD be ignored during unmarshaling.
-*/
-
-const (
-	protocolVersion   uint8  = 1
-	commonHeaderBytes uint16 = 8
-	bigEndian                = 0
-	littleEndian             = 1
-	ascii             uint8  = 0
-	ebcdic            uint8  = 1
-	ieee              uint8  = 0
-	vax               uint8  = 1
-	cray              uint8  = 2
-	ibm               uint8  = 3
-)
-
-// CommonHeader implements the NDR common header: https://msdn.microsoft.com/en-us/library/cc243889.aspx
-type CommonHeader struct {
-	Version             uint8
-	Endianness          binary.ByteOrder
-	CharacterEncoding   uint8
-	FloatRepresentation uint8
-	HeaderLength        uint16
-	Filler              []byte
-}
-
-// PrivateHeader implements the NDR private header: https://msdn.microsoft.com/en-us/library/cc243919.aspx
-type PrivateHeader struct {
-	ObjectBufferLength uint32
-	Filler             []byte
-}
-
-func (dec *Decoder) readCommonHeader() error {
-	// Version
-	vb, err := dec.r.ReadByte()
-	if err != nil {
-		return Malformed{EText: "could not read first byte of common header for version"}
-	}
-	dec.ch.Version = uint8(vb)
-	if dec.ch.Version != protocolVersion {
-		return Malformed{EText: fmt.Sprintf("byte stream does not indicate a RPC Type serialization of version %v", protocolVersion)}
-	}
-	// Read Endianness & Character Encoding
-	eb, err := dec.r.ReadByte()
-	if err != nil {
-		return Malformed{EText: "could not read second byte of common header for endianness"}
-	}
-	endian := int(eb >> 4 & 0xF)
-	if endian != 0 && endian != 1 {
-		return Malformed{EText: "common header does not indicate a valid endianness"}
-	}
-	dec.ch.CharacterEncoding = uint8(vb & 0xF)
-	if dec.ch.CharacterEncoding != 0 && dec.ch.CharacterEncoding != 1 {
-		return Malformed{EText: "common header does not indicate a valid character encoding"}
-	}
-	switch endian {
-	case littleEndian:
-		dec.ch.Endianness = binary.LittleEndian
-	case bigEndian:
-		dec.ch.Endianness = binary.BigEndian
-	}
-	// Common header length
-	lb, err := dec.readBytes(2)
-	if err != nil {
-		return Malformed{EText: fmt.Sprintf("could not read common header length: %v", err)}
-	}
-	dec.ch.HeaderLength = dec.ch.Endianness.Uint16(lb)
-	if dec.ch.HeaderLength != commonHeaderBytes {
-		return Malformed{EText: "common header does not indicate a valid length"}
-	}
-	// Filler bytes
-	dec.ch.Filler, err = dec.readBytes(4)
-	if err != nil {
-		return Malformed{EText: fmt.Sprintf("could not read common header filler: %v", err)}
-	}
-	return nil
-}
-
-func (dec *Decoder) readPrivateHeader() error {
-	// The next 8 bytes after the common header comprise the RPC type marshalling private header for constructed types.
-	err := binary.Read(dec.r, dec.ch.Endianness, &dec.ph.ObjectBufferLength)
-	if err != nil {
-		return Malformed{EText: "could not read private header object buffer length"}
-	}
-	if dec.ph.ObjectBufferLength%8 != 0 {
-		return Malformed{EText: "object buffer length not a multiple of 8"}
-	}
-	// Filler bytes
-	dec.ph.Filler, err = dec.readBytes(4)
-	if err != nil {
-		return Malformed{EText: fmt.Sprintf("could not read private header filler: %v", err)}
-	}
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go
deleted file mode 100644
index 5fd27da..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/pipe.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package ndr
-
-import (
-	"fmt"
-	"reflect"
-)
-
-func (dec *Decoder) fillPipe(v reflect.Value, tag reflect.StructTag) error {
-	s, err := dec.readUint32() // read element count of first chunk
-	if err != nil {
-		return err
-	}
-	a := reflect.MakeSlice(v.Type(), 0, 0)
-	c := reflect.MakeSlice(v.Type(), int(s), int(s))
-	for s != 0 {
-		for i := 0; i < int(s); i++ {
-			err := dec.fill(c.Index(i), tag, &[]deferedPtr{})
-			if err != nil {
-				return fmt.Errorf("could not fill element %d of pipe: %v", i, err)
-			}
-		}
-		s, err = dec.readUint32() // read element count of first chunk
-		if err != nil {
-			return err
-		}
-		a = reflect.AppendSlice(a, c)
-		c = reflect.MakeSlice(v.Type(), int(s), int(s))
-	}
-	v.Set(a)
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go
deleted file mode 100644
index 7eb1d1a..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/primitives.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package ndr
-
-import (
-	"bytes"
-	"encoding/binary"
-	"math"
-)
-
-// Byte sizes of primitive types
-const (
-	SizeBool   = 1
-	SizeChar   = 1
-	SizeUint8  = 1
-	SizeUint16 = 2
-	SizeUint32 = 4
-	SizeUint64 = 8
-	SizeEnum   = 2
-	SizeSingle = 4
-	SizeDouble = 8
-	SizePtr    = 4
-)
-
-// Bool is an NDR Boolean which is a logical quantity that assumes one of two values: TRUE or FALSE.
-// NDR represents a Boolean as one octet.
-// It represents a value of FALSE as a zero octet, an octet in which every bit is reset.
-// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set.
-
-// Char is an NDR character.
-// NDR represents a character as one octet.
-// Characters have two representation formats: ASCII and EBCDIC.
-
-// USmall is an unsigned 8 bit integer
-
-// UShort is an unsigned 16 bit integer
-
-// ULong is an unsigned 32 bit integer
-
-// UHyper is an unsigned 64 bit integer
-
-// Small is an signed 8 bit integer
-
-// Short is an signed 16 bit integer
-
-// Long is an signed 32 bit integer
-
-// Hyper is an signed 64 bit integer
-
-// Enum is the NDR representation of enumerated types as signed short integers (2 octets)
-
-// Single is an NDR defined single-precision floating-point data type
-
-// Double is an NDR defined double-precision floating-point data type
-
-// readBool reads a byte representing a boolean.
-// NDR represents a Boolean as one octet.
-// It represents a value of FALSE as a zero octet, an octet in which every bit is reset.
-// It represents a value of TRUE as a non-zero octet, an octet in which one or more bits are set.
-func (dec *Decoder) readBool() (bool, error) {
-	i, err := dec.readUint8()
-	if err != nil {
-		return false, err
-	}
-	if i != 0 {
-		return true, nil
-	}
-	return false, nil
-}
-
-// readChar reads bytes representing a 8bit ASCII integer cast to a rune.
-func (dec *Decoder) readChar() (rune, error) {
-	var r rune
-	a, err := dec.readUint8()
-	if err != nil {
-		return r, err
-	}
-	return rune(a), nil
-}
-
-// readUint8 reads bytes representing a 8bit unsigned integer.
-func (dec *Decoder) readUint8() (uint8, error) {
-	b, err := dec.r.ReadByte()
-	if err != nil {
-		return uint8(0), err
-	}
-	return uint8(b), nil
-}
-
-// readUint16 reads bytes representing a 16bit unsigned integer.
-func (dec *Decoder) readUint16() (uint16, error) {
-	dec.ensureAlignment(SizeUint16)
-	b, err := dec.readBytes(SizeUint16)
-	if err != nil {
-		return uint16(0), err
-	}
-	return dec.ch.Endianness.Uint16(b), nil
-}
-
-// readUint32 reads bytes representing a 32bit unsigned integer.
-func (dec *Decoder) readUint32() (uint32, error) {
-	dec.ensureAlignment(SizeUint32)
-	b, err := dec.readBytes(SizeUint32)
-	if err != nil {
-		return uint32(0), err
-	}
-	return dec.ch.Endianness.Uint32(b), nil
-}
-
-// readUint32 reads bytes representing a 32bit unsigned integer.
-func (dec *Decoder) readUint64() (uint64, error) {
-	dec.ensureAlignment(SizeUint64)
-	b, err := dec.readBytes(SizeUint64)
-	if err != nil {
-		return uint64(0), err
-	}
-	return dec.ch.Endianness.Uint64(b), nil
-}
-
-func (dec *Decoder) readInt8() (int8, error) {
-	dec.ensureAlignment(SizeUint8)
-	b, err := dec.readBytes(SizeUint8)
-	if err != nil {
-		return 0, err
-	}
-	var i int8
-	buf := bytes.NewReader(b)
-	err = binary.Read(buf, dec.ch.Endianness, &i)
-	if err != nil {
-		return 0, err
-	}
-	return i, nil
-}
-
-func (dec *Decoder) readInt16() (int16, error) {
-	dec.ensureAlignment(SizeUint16)
-	b, err := dec.readBytes(SizeUint16)
-	if err != nil {
-		return 0, err
-	}
-	var i int16
-	buf := bytes.NewReader(b)
-	err = binary.Read(buf, dec.ch.Endianness, &i)
-	if err != nil {
-		return 0, err
-	}
-	return i, nil
-}
-
-func (dec *Decoder) readInt32() (int32, error) {
-	dec.ensureAlignment(SizeUint32)
-	b, err := dec.readBytes(SizeUint32)
-	if err != nil {
-		return 0, err
-	}
-	var i int32
-	buf := bytes.NewReader(b)
-	err = binary.Read(buf, dec.ch.Endianness, &i)
-	if err != nil {
-		return 0, err
-	}
-	return i, nil
-}
-
-func (dec *Decoder) readInt64() (int64, error) {
-	dec.ensureAlignment(SizeUint64)
-	b, err := dec.readBytes(SizeUint64)
-	if err != nil {
-		return 0, err
-	}
-	var i int64
-	buf := bytes.NewReader(b)
-	err = binary.Read(buf, dec.ch.Endianness, &i)
-	if err != nil {
-		return 0, err
-	}
-	return i, nil
-}
-
-// https://en.wikipedia.org/wiki/IEEE_754-1985
-func (dec *Decoder) readFloat32() (f float32, err error) {
-	dec.ensureAlignment(SizeSingle)
-	b, err := dec.readBytes(SizeSingle)
-	if err != nil {
-		return
-	}
-	bits := dec.ch.Endianness.Uint32(b)
-	f = math.Float32frombits(bits)
-	return
-}
-
-func (dec *Decoder) readFloat64() (f float64, err error) {
-	dec.ensureAlignment(SizeDouble)
-	b, err := dec.readBytes(SizeDouble)
-	if err != nil {
-		return
-	}
-	bits := dec.ch.Endianness.Uint64(b)
-	f = math.Float64frombits(bits)
-	return
-}
-
-// NDR enforces NDR alignment of primitive data; that is, any primitive of size n octets is aligned at a octet stream
-// index that is a multiple of n. (In this version of NDR, n is one of {1, 2, 4, 8}.) An octet stream index indicates
-// the number of an octet in an octet stream when octets are numbered, beginning with 0, from the first octet in the
-// stream. Where necessary, an alignment gap, consisting of octets of unspecified value, precedes the representation
-// of a primitive. The gap is of the smallest size sufficient to align the primitive.
-func (dec *Decoder) ensureAlignment(n int) {
-	p := dec.size - dec.r.Buffered()
-	if s := p % n; s != 0 {
-		dec.r.Discard(n - s)
-	}
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go
deleted file mode 100644
index 9ee59fb..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/rawbytes.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package ndr
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"strconv"
-)
-
-// type MyBytes []byte
-// implement RawBytes interface
-
-const (
-	sizeMethod = "Size"
-)
-
-// RawBytes interface should be implemented if reading just a number of bytes from the NDR stream
-type RawBytes interface {
-	Size(interface{}) int
-}
-
-func rawBytesSize(parent reflect.Value, v reflect.Value) (int, error) {
-	sf := v.MethodByName(sizeMethod)
-	if !sf.IsValid() {
-		return 0, fmt.Errorf("could not find a method called %s on the implementation of RawBytes", sizeMethod)
-	}
-	in := []reflect.Value{parent}
-	f := sf.Call(in)
-	if f[0].Kind() != reflect.Int {
-		return 0, errors.New("the RawBytes size function did not return an integer")
-	}
-	return int(f[0].Int()), nil
-}
-
-func addSizeToTag(parent reflect.Value, v reflect.Value, tag reflect.StructTag) (reflect.StructTag, error) {
-	size, err := rawBytesSize(parent, v)
-	if err != nil {
-		return tag, err
-	}
-	ndrTag := parseTags(tag)
-	ndrTag.Map["size"] = strconv.Itoa(size)
-	return ndrTag.StructTag(), nil
-}
-
-func (dec *Decoder) readRawBytes(v reflect.Value, tag reflect.StructTag) error {
-	ndrTag := parseTags(tag)
-	sizeStr, ok := ndrTag.Map["size"]
-	if !ok {
-		return errors.New("size tag not available")
-	}
-	size, err := strconv.Atoi(sizeStr)
-	if err != nil {
-		return fmt.Errorf("size not valid: %v", err)
-	}
-	b, err := dec.readBytes(size)
-	if err != nil {
-		return err
-	}
-	v.Set(reflect.ValueOf(b).Convert(v.Type()))
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go
deleted file mode 100644
index b7a910b..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/strings.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package ndr
-
-import (
-	"fmt"
-	"reflect"
-)
-
-const (
-	subStringArrayTag   = `ndr:"varying,X-subStringArray"`
-	subStringArrayValue = "X-subStringArray"
-)
-
-func uint16SliceToString(a []uint16) string {
-	s := make([]rune, len(a), len(a))
-	for i := range s {
-		s[i] = rune(a[i])
-	}
-	if len(s) > 0 {
-		// Remove any null terminator
-		if s[len(s)-1] == rune(0) {
-			s = s[:len(s)-1]
-		}
-	}
-	return string(s)
-}
-
-func (dec *Decoder) readVaryingString(def *[]deferedPtr) (string, error) {
-	a := new([]uint16)
-	v := reflect.ValueOf(a)
-	var t reflect.StructTag
-	err := dec.fillUniDimensionalVaryingArray(v.Elem(), t, def)
-	if err != nil {
-		return "", err
-	}
-	s := uint16SliceToString(*a)
-	return s, nil
-}
-
-func (dec *Decoder) readConformantVaryingString(def *[]deferedPtr) (string, error) {
-	a := new([]uint16)
-	v := reflect.ValueOf(a)
-	var t reflect.StructTag
-	err := dec.fillUniDimensionalConformantVaryingArray(v.Elem(), t, def)
-	if err != nil {
-		return "", err
-	}
-	s := uint16SliceToString(*a)
-	return s, nil
-}
-
-func (dec *Decoder) readStringsArray(v reflect.Value, tag reflect.StructTag, def *[]deferedPtr) error {
-	d, _ := sliceDimensions(v.Type())
-	ndrTag := parseTags(tag)
-	var m []int
-	//var ms int
-	if ndrTag.HasValue(TagConformant) {
-		for i := 0; i < d; i++ {
-			m = append(m, int(dec.precedingMax()))
-		}
-		//common max size
-		_ = dec.precedingMax()
-		//ms = int(n)
-	}
-	tag = reflect.StructTag(subStringArrayTag)
-	err := dec.fillVaryingArray(v, tag, def)
-	if err != nil {
-		return fmt.Errorf("could not read string array: %v", err)
-	}
-	return nil
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go
deleted file mode 100644
index 01657e0..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/tags.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package ndr
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-)
-
-const ndrNameSpace = "ndr"
-
-type tags struct {
-	Values []string
-	Map    map[string]string
-}
-
-// parse the struct field tags and extract the ndr related ones.
-// format of tag ndr:"value,key:value1,value2"
-func parseTags(st reflect.StructTag) tags {
-	s := st.Get(ndrNameSpace)
-	t := tags{
-		Values: []string{},
-		Map:    make(map[string]string),
-	}
-	if s != "" {
-		ndrTags := strings.Trim(s, `"`)
-		for _, tag := range strings.Split(ndrTags, ",") {
-			if strings.Contains(tag, ":") {
-				m := strings.SplitN(tag, ":", 2)
-				t.Map[m[0]] = m[1]
-			} else {
-				t.Values = append(t.Values, tag)
-			}
-		}
-	}
-	return t
-}
-
-func appendTag(t reflect.StructTag, s string) reflect.StructTag {
-	ts := t.Get(ndrNameSpace)
-	ts = fmt.Sprintf(`%s"%s,%s"`, ndrNameSpace, ts, s)
-	return reflect.StructTag(ts)
-}
-
-func (t *tags) StructTag() reflect.StructTag {
-	mv := t.Values
-	for key, val := range t.Map {
-		mv = append(mv, key+":"+val)
-	}
-	s := ndrNameSpace + ":" + `"` + strings.Join(mv, ",") + `"`
-	return reflect.StructTag(s)
-}
-
-func (t *tags) delete(s string) {
-	for i, x := range t.Values {
-		if x == s {
-			t.Values = append(t.Values[:i], t.Values[i+1:]...)
-		}
-	}
-	delete(t.Map, s)
-}
-
-func (t *tags) HasValue(s string) bool {
-	for _, v := range t.Values {
-		if v == s {
-			return true
-		}
-	}
-	return false
-}
diff --git a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go b/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go
deleted file mode 100644
index 6a657fa..0000000
--- a/vendor/gopkg.in/jcmturner/rpc.v1/ndr/union.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package ndr
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-)
-
-// Union interface must be implemented by structs that will be unmarshaled into from the NDR byte stream union representation.
-// The union's discriminating tag will be passed to the SwitchFunc method.
-// The discriminating tag field must have the struct tag: `ndr:"unionTag"`
-// If the union is encapsulated the discriminating tag field must have the struct tag: `ndr:"encapsulated"`
-// The possible value fields that can be selected from must have the struct tag: `ndr:"unionField"`
-type Union interface {
-	SwitchFunc(t interface{}) string
-}
-
-// Union related constants such as struct tag values
-const (
-	unionSelectionFuncName = "SwitchFunc"
-	TagEncapsulated        = "encapsulated"
-	TagUnionTag            = "unionTag"
-	TagUnionField          = "unionField"
-)
-
-func (dec *Decoder) isUnion(field reflect.Value, tag reflect.StructTag) (r reflect.Value) {
-	ndrTag := parseTags(tag)
-	if !ndrTag.HasValue(TagUnionTag) {
-		return
-	}
-	r = field
-	// For a non-encapsulated union, the discriminant is marshalled into the transmitted data stream twice: once as the
-	// field or parameter, which is referenced by the switch_is construct, in the procedure argument list; and once as
-	// the first part of the union representation.
-	if !ndrTag.HasValue(TagEncapsulated) {
-		dec.r.Discard(int(r.Type().Size()))
-	}
-	return
-}
-
-// unionSelectedField returns the field name of which of the union values to fill
-func unionSelectedField(union, discriminant reflect.Value) (string, error) {
-	if !union.Type().Implements(reflect.TypeOf(new(Union)).Elem()) {
-		return "", errors.New("struct does not implement union interface")
-	}
-	args := []reflect.Value{discriminant}
-	// Call the SelectFunc of the union struct to find the name of the field to fill with the value selected.
-	sf := union.MethodByName(unionSelectionFuncName)
-	if !sf.IsValid() {
-		return "", fmt.Errorf("could not find a selection function called %s in the unions struct representation", unionSelectionFuncName)
-	}
-	f := sf.Call(args)
-	if f[0].Kind() != reflect.String || f[0].String() == "" {
-		return "", fmt.Errorf("the union select function did not return a string for the name of the field to fill")
-	}
-	return f[0].String(), nil
-}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
index 9f55693..7348c50 100644
--- a/vendor/gopkg.in/yaml.v2/.travis.yml
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -1,12 +1,17 @@
 language: go
 
 go:
-    - 1.4
-    - 1.5
-    - 1.6
-    - 1.7
-    - 1.8
-    - 1.9
-    - tip
+    - "1.4.x"
+    - "1.5.x"
+    - "1.6.x"
+    - "1.7.x"
+    - "1.8.x"
+    - "1.9.x"
+    - "1.10.x"
+    - "1.11.x"
+    - "1.12.x"
+    - "1.13.x"
+    - "1.14.x"
+    - "tip"
 
 go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go
index 1f7e87e..acf7140 100644
--- a/vendor/gopkg.in/yaml.v2/apic.go
+++ b/vendor/gopkg.in/yaml.v2/apic.go
@@ -79,6 +79,8 @@
 	parser.encoding = encoding
 }
 
+var disableLineWrapping = false
+
 // Create a new emitter object.
 func yaml_emitter_initialize(emitter *yaml_emitter_t) {
 	*emitter = yaml_emitter_t{
@@ -87,6 +89,9 @@
 		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
 		events:     make([]yaml_event_t, 0, initial_queue_size),
 	}
+	if disableLineWrapping {
+		emitter.best_width = -1
+	}
 }
 
 // Destroy an emitter object.
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go
index 91679b5..129bc2a 100644
--- a/vendor/gopkg.in/yaml.v2/decode.go
+++ b/vendor/gopkg.in/yaml.v2/decode.go
@@ -318,12 +318,41 @@
 	return out, false, false
 }
 
+const (
+	// 400,000 decode operations is ~500kb of dense object declarations, or
+	// ~5kb of dense object declarations with 10000% alias expansion
+	alias_ratio_range_low = 400000
+
+	// 4,000,000 decode operations is ~5MB of dense object declarations, or
+	// ~4.5MB of dense object declarations with 10% alias expansion
+	alias_ratio_range_high = 4000000
+
+	// alias_ratio_range is the range over which we scale allowed alias ratios
+	alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+	switch {
+	case decodeCount <= alias_ratio_range_low:
+		// allow 99% to come from alias expansion for small-to-medium documents
+		return 0.99
+	case decodeCount >= alias_ratio_range_high:
+		// allow 10% to come from alias expansion for very large documents
+		return 0.10
+	default:
+		// scale smoothly from 99% down to 10% over the range.
+		// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+		// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+		return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+	}
+}
+
 func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
 	d.decodeCount++
 	if d.aliasDepth > 0 {
 		d.aliasCount++
 	}
-	if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > 0.99 {
+	if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
 		failf("document contains excessive aliasing")
 	}
 	switch n.kind {
@@ -759,8 +788,7 @@
 	case mappingNode:
 		d.unmarshal(n, out)
 	case aliasNode:
-		an, ok := d.doc.anchors[n.value]
-		if ok && an.kind != mappingNode {
+		if n.alias != nil && n.alias.kind != mappingNode {
 			failWantMap()
 		}
 		d.unmarshal(n, out)
@@ -769,8 +797,7 @@
 		for i := len(n.children) - 1; i >= 0; i-- {
 			ni := n.children[i]
 			if ni.kind == aliasNode {
-				an, ok := d.doc.anchors[ni.value]
-				if ok && an.kind != mappingNode {
+				if ni.alias != nil && ni.alias.kind != mappingNode {
 					failWantMap()
 				}
 			} else if ni.kind != mappingNode {
diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod
index 1934e87..2cbb85a 100644
--- a/vendor/gopkg.in/yaml.v2/go.mod
+++ b/vendor/gopkg.in/yaml.v2/go.mod
@@ -1,5 +1,5 @@
-module "gopkg.in/yaml.v2"
+module gopkg.in/yaml.v2
 
-require (
-	"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
-)
+go 1.15
+
+require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
index 077fd1d..0b9bb60 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -626,30 +626,17 @@
 func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
 	// While we need more tokens to fetch, do it.
 	for {
-		// Check if we really need to fetch more tokens.
-		need_more_tokens := false
-
-		if parser.tokens_head == len(parser.tokens) {
-			// Queue is empty.
-			need_more_tokens = true
-		} else {
-			// Check if any potential simple key may occupy the head position.
-			if !yaml_parser_stale_simple_keys(parser) {
+		if parser.tokens_head != len(parser.tokens) {
+			// If queue is non-empty, check if any potential simple key may
+			// occupy the head position.
+			head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+			if !ok {
+				break
+			} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
 				return false
+			} else if !valid {
+				break
 			}
-
-			for i := range parser.simple_keys {
-				simple_key := &parser.simple_keys[i]
-				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
-					need_more_tokens = true
-					break
-				}
-			}
-		}
-
-		// We are finished.
-		if !need_more_tokens {
-			break
 		}
 		// Fetch the next token.
 		if !yaml_parser_fetch_next_token(parser) {
@@ -678,11 +665,6 @@
 		return false
 	}
 
-	// Remove obsolete potential simple keys.
-	if !yaml_parser_stale_simple_keys(parser) {
-		return false
-	}
-
 	// Check the indentation level against the current column.
 	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
 		return false
@@ -837,29 +819,30 @@
 		"found character that cannot start any token")
 }
 
-// Check the list of potential simple keys and remove the positions that
-// cannot contain simple keys anymore.
-func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
-	// Check for a potential simple key for each flow level.
-	for i := range parser.simple_keys {
-		simple_key := &parser.simple_keys[i]
-
-		// The specification requires that a simple key
-		//
-		//  - is limited to a single line,
-		//  - is shorter than 1024 characters.
-		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
-
-			// Check if the potential simple key to be removed is required.
-			if simple_key.required {
-				return yaml_parser_set_scanner_error(parser,
-					"while scanning a simple key", simple_key.mark,
-					"could not find expected ':'")
-			}
-			simple_key.possible = false
-		}
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+	if !simple_key.possible {
+		return false, true
 	}
-	return true
+
+	// The 1.2 specification says:
+	//
+	//     "If the ? indicator is omitted, parsing needs to see past the
+	//     implicit key to recognize it as such. To limit the amount of
+	//     lookahead required, the “:” indicator must appear at most 1024
+	//     Unicode characters beyond the start of the key. In addition, the key
+	//     is restricted to a single line."
+	//
+	if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+		// Check if the potential simple key to be removed is required.
+		if simple_key.required {
+			return false, yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", simple_key.mark,
+				"could not find expected ':'")
+		}
+		simple_key.possible = false
+		return false, true
+	}
+	return true, true
 }
 
 // Check if a simple key may start at the current position and add it if
@@ -879,13 +862,14 @@
 			possible:     true,
 			required:     required,
 			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+			mark:         parser.mark,
 		}
-		simple_key.mark = parser.mark
 
 		if !yaml_parser_remove_simple_key(parser) {
 			return false
 		}
 		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+		parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
 	}
 	return true
 }
@@ -900,19 +884,33 @@
 				"while scanning a simple key", parser.simple_keys[i].mark,
 				"could not find expected ':'")
 		}
+		// Remove the key from the stack.
+		parser.simple_keys[i].possible = false
+		delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
 	}
-	// Remove the key from the stack.
-	parser.simple_keys[i].possible = false
 	return true
 }
 
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
 // Increase the flow level and resize the simple key list if needed.
 func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
 	// Reset the simple key on the next level.
-	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+		possible:     false,
+		required:     false,
+		token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		mark:         parser.mark,
+	})
 
 	// Increase the flow level.
 	parser.flow_level++
+	if parser.flow_level > max_flow_level {
+		return yaml_parser_set_scanner_error(parser,
+			"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+			fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+	}
 	return true
 }
 
@@ -920,11 +918,16 @@
 func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
 	if parser.flow_level > 0 {
 		parser.flow_level--
-		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+		last := len(parser.simple_keys) - 1
+		delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+		parser.simple_keys = parser.simple_keys[:last]
 	}
 	return true
 }
 
+// max_indents limits the indents stack size
+const max_indents = 10000
+
 // Push the current indentation level to the stack and set the new level
 // the current column is greater than the indentation level.  In this case,
 // append or insert the specified token into the token queue.
@@ -939,6 +942,11 @@
 		// indentation level.
 		parser.indents = append(parser.indents, parser.indent)
 		parser.indent = column
+		if len(parser.indents) > max_indents {
+			return yaml_parser_set_scanner_error(parser,
+				"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+				fmt.Sprintf("exceeded max depth of %d", max_indents))
+		}
 
 		// Create a token and insert it into the queue.
 		token := yaml_token_t{
@@ -989,6 +997,8 @@
 	// Initialize the simple key stack.
 	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
 
+	parser.simple_keys_by_tok = make(map[int]int)
+
 	// A simple key is allowed at the beginning of the stream.
 	parser.simple_key_allowed = true
 
@@ -1270,7 +1280,11 @@
 	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
 
 	// Have we found a simple key?
-	if simple_key.possible {
+	if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+		return false
+
+	} else if valid {
+
 		// Create the KEY token and insert it into the queue.
 		token := yaml_token_t{
 			typ:        yaml_KEY_TOKEN,
@@ -1288,6 +1302,7 @@
 
 		// Remove the simple key.
 		simple_key.possible = false
+		delete(parser.simple_keys_by_tok, simple_key.token_number)
 
 		// A simple key cannot follow another simple key.
 		parser.simple_key_allowed = false
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
index de85aa4..3081388 100644
--- a/vendor/gopkg.in/yaml.v2/yaml.go
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -89,7 +89,7 @@
 	return unmarshal(in, out, true)
 }
 
-// A Decorder reads and decodes YAML values from an input stream.
+// A Decoder reads and decodes YAML values from an input stream.
 type Decoder struct {
 	strict bool
 	parser *parser
@@ -175,7 +175,7 @@
 //                  Zero valued structs will be omitted if all their public
 //                  fields are zero, unless they implement an IsZero
 //                  method (see the IsZeroer interface type), in which
-//                  case the field will be included if that method returns true.
+//                  case the field will be excluded if IsZero returns true.
 //
 //     flow         Marshal using a flow style (useful for structs,
 //                  sequences and maps).
@@ -464,3 +464,15 @@
 	}
 	return false
 }
+
+// FutureLineWrap globally disables line wrapping when encoding long strings.
+// This is a temporary and thus deprecated method introduced to faciliate
+// migration towards v3, which offers more control of line lengths on
+// individual encodings, and has a default matching the behavior introduced
+// by this function.
+//
+// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
+// in v2.4.0, at which point this function was introduced to help migration.
+func FutureLineWrap() {
+	disableLineWrapping = true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
index e25cee5..f6a9c8e 100644
--- a/vendor/gopkg.in/yaml.v2/yamlh.go
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -579,6 +579,7 @@
 
 	simple_key_allowed bool                // May a simple key occur at the current position?
 	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+	simple_keys_by_tok map[int]int         // possible simple_key indexes indexed by token_number
 
 	// Parser stuff
 
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
new file mode 100644
index 0000000..2683e4b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+    apic.go emitterc.go parserc.go readerc.go scannerc.go
+    writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md
new file mode 100644
index 0000000..08eb1ba
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/README.md
@@ -0,0 +1,150 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.2, but preserves some behavior
+from 1.1 for backwards compatibility.
+
+Specifically, as of v3 of the yaml package:
+
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
+   decoded into a typed bool value. Otherwise they behave as a string. Booleans
+   in YAML 1.2 are _true/false_ only.
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
+   as specified in YAML 1.2, because most parsers still use the old format.
+   Octals in the  _0o777_ format are supported though, so new files work.
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
+   actually never supported by this package as it's clearly a poor choice.
+
+and offers backwards
+compatibility with YAML 1.1 in some cases.
+1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v3*.
+
+To install it, run:
+
+    go get gopkg.in/yaml.v3
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+  - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
+
+API stability
+-------------
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+        "fmt"
+        "log"
+
+        "gopkg.in/yaml.v3"
+)
+
+var data = `
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+        A string
+        B struct {
+                RenamedC int   `yaml:"c"`
+                D        []int `yaml:",flow"`
+        }
+}
+
+func main() {
+        t := T{}
+    
+        err := yaml.Unmarshal([]byte(data), &t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t:\n%v\n\n", t)
+    
+        d, err := yaml.Marshal(&t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t dump:\n%s\n\n", string(d))
+    
+        m := make(map[interface{}]interface{})
+    
+        err = yaml.Unmarshal([]byte(data), &m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m:\n%v\n\n", m)
+    
+        d, err = yaml.Marshal(&m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+  c: 2
+  d:
+  - 3
+  - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
new file mode 100644
index 0000000..ae7d049
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/apic.go
@@ -0,0 +1,747 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+	"io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+	// Check if we can move the queue at the beginning of the buffer.
+	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+		if parser.tokens_head != len(parser.tokens) {
+			copy(parser.tokens, parser.tokens[parser.tokens_head:])
+		}
+		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+		parser.tokens_head = 0
+	}
+	parser.tokens = append(parser.tokens, *token)
+	if pos < 0 {
+		return
+	}
+	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+	parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+	*parser = yaml_parser_t{
+		raw_buffer: make([]byte, 0, input_raw_buffer_size),
+		buffer:     make([]byte, 0, input_buffer_size),
+	}
+	return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+	*parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	if parser.input_pos == len(parser.input) {
+		return 0, io.EOF
+	}
+	n = copy(buffer, parser.input[parser.input_pos:])
+	parser.input_pos += n
+	return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_string_read_handler
+	parser.input = input
+	parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_reader_read_handler
+	parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+	if parser.encoding != yaml_ANY_ENCODING {
+		panic("must set the encoding only once")
+	}
+	parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{
+		buffer:     make([]byte, output_buffer_size),
+		raw_buffer: make([]byte, 0, output_raw_buffer_size),
+		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+		events:     make([]yaml_event_t, 0, initial_queue_size),
+		best_width: -1,
+	}
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+	return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_writer.Write(buffer)
+	return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_string_write_handler
+	emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_writer_write_handler
+	emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+	if emitter.encoding != yaml_ANY_ENCODING {
+		panic("must set the output encoding only once")
+	}
+	emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+	emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+	if indent < 2 || indent > 9 {
+		indent = 2
+	}
+	emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+	if width < 0 {
+		width = -1
+	}
+	emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+	emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+	emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+	*event = yaml_event_t{
+		typ:      yaml_STREAM_START_EVENT,
+		encoding: encoding,
+	}
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_STREAM_END_EVENT,
+	}
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+	event *yaml_event_t,
+	version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t,
+	implicit bool,
+) {
+	*event = yaml_event_t{
+		typ:               yaml_DOCUMENT_START_EVENT,
+		version_directive: version_directive,
+		tag_directives:    tag_directives,
+		implicit:          implicit,
+	}
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+	*event = yaml_event_t{
+		typ:      yaml_DOCUMENT_END_EVENT,
+		implicit: implicit,
+	}
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+	*event = yaml_event_t{
+		typ:    yaml_ALIAS_EVENT,
+		anchor: anchor,
+	}
+	return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+	*event = yaml_event_t{
+		typ:             yaml_SCALAR_EVENT,
+		anchor:          anchor,
+		tag:             tag,
+		value:           value,
+		implicit:        plain_implicit,
+		quoted_implicit: quoted_implicit,
+		style:           yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_SEQUENCE_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_SEQUENCE_END_EVENT,
+	}
+	return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+	*event = yaml_event_t{
+		typ:      yaml_MAPPING_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_MAPPING_END_EVENT,
+	}
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+	*event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
new file mode 100644
index 0000000..df36e3a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -0,0 +1,950 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *Node
+	anchors  map[string]*Node
+	doneInit bool
+	textless bool
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	yaml_parser_set_input_reader(&p.parser, r)
+	return &p
+}
+
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.anchors = make(map[string]*Node)
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
+		}
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+	return p.event.typ
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
+	} else if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+	if anchor != nil {
+		n.Anchor = string(anchor)
+		p.anchors[n.Anchor] = n
+	}
+}
+
+func (p *parser) parse() *Node {
+	p.init()
+	switch p.peek() {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	case yaml_TAIL_COMMENT_EVENT:
+		panic("internal error: unexpected tail comment event (please report)")
+	default:
+		panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+	}
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+	var style Style
+	if tag != "" && tag != "!" {
+		tag = shortTag(tag)
+		style = TaggedStyle
+	} else if defaultTag != "" {
+		tag = defaultTag
+	} else if kind == ScalarNode {
+		tag, _ = resolve("", value)
+	}
+	n := &Node{
+		Kind:  kind,
+		Tag:   tag,
+		Value: value,
+		Style: style,
+	}
+	if !p.textless {
+		n.Line = p.event.start_mark.line + 1
+		n.Column = p.event.start_mark.column + 1
+		n.HeadComment = string(p.event.head_comment)
+		n.LineComment = string(p.event.line_comment)
+		n.FootComment = string(p.event.foot_comment)
+	}
+	return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+	child := p.parse()
+	parent.Content = append(parent.Content, child)
+	return child
+}
+
+func (p *parser) document() *Node {
+	n := p.node(DocumentNode, "", "", "")
+	p.doc = n
+	p.expect(yaml_DOCUMENT_START_EVENT)
+	p.parseChild(n)
+	if p.peek() == yaml_DOCUMENT_END_EVENT {
+		n.FootComment = string(p.event.foot_comment)
+	}
+	p.expect(yaml_DOCUMENT_END_EVENT)
+	return n
+}
+
+func (p *parser) alias() *Node {
+	n := p.node(AliasNode, "", "", string(p.event.anchor))
+	n.Alias = p.anchors[n.Value]
+	if n.Alias == nil {
+		failf("unknown anchor '%s' referenced", n.Value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
+	return n
+}
+
+func (p *parser) scalar() *Node {
+	var parsedStyle = p.event.scalar_style()
+	var nodeStyle Style
+	switch {
+	case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+		nodeStyle = DoubleQuotedStyle
+	case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+		nodeStyle = SingleQuotedStyle
+	case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+		nodeStyle = LiteralStyle
+	case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+		nodeStyle = FoldedStyle
+	}
+	var nodeValue = string(p.event.value)
+	var nodeTag = string(p.event.tag)
+	var defaultTag string
+	if nodeStyle == 0 {
+		if nodeValue == "<<" {
+			defaultTag = mergeTag
+		}
+	} else {
+		defaultTag = strTag
+	}
+	n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+	n.Style |= nodeStyle
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SCALAR_EVENT)
+	return n
+}
+
+func (p *parser) sequence() *Node {
+	n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+	if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+		n.Style |= FlowStyle
+	}
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
+		p.parseChild(n)
+	}
+	n.LineComment = string(p.event.line_comment)
+	n.FootComment = string(p.event.foot_comment)
+	p.expect(yaml_SEQUENCE_END_EVENT)
+	return n
+}
+
+func (p *parser) mapping() *Node {
+	n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+	block := true
+	if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+		block = false
+		n.Style |= FlowStyle
+	}
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
+		k := p.parseChild(n)
+		if block && k.FootComment != "" {
+			// Must be a foot comment for the prior value when being dedented.
+			if len(n.Content) > 2 {
+				n.Content[len(n.Content)-3].FootComment = k.FootComment
+				k.FootComment = ""
+			}
+		}
+		v := p.parseChild(n)
+		if k.FootComment == "" && v.FootComment != "" {
+			k.FootComment = v.FootComment
+			v.FootComment = ""
+		}
+		if p.peek() == yaml_TAIL_COMMENT_EVENT {
+			if k.FootComment == "" {
+				k.FootComment = string(p.event.foot_comment)
+			}
+			p.expect(yaml_TAIL_COMMENT_EVENT)
+		}
+	}
+	n.LineComment = string(p.event.line_comment)
+	n.FootComment = string(p.event.foot_comment)
+	if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+		n.Content[len(n.Content)-2].FootComment = n.FootComment
+		n.FootComment = ""
+	}
+	p.expect(yaml_MAPPING_END_EVENT)
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *Node
+	aliases map[*Node]bool
+	terrors []string
+
+	stringMapType  reflect.Type
+	generalMapType reflect.Type
+
+	knownFields bool
+	uniqueKeys  bool
+	decodeCount int
+	aliasCount  int
+	aliasDepth  int
+}
+
+var (
+	nodeType       = reflect.TypeOf(Node{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	stringMapType  = reflect.TypeOf(map[string]interface{}{})
+	generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = generalMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+	d := &decoder{
+		stringMapType:  stringMapType,
+		generalMapType: generalMapType,
+		uniqueKeys:     true,
+	}
+	d.aliases = make(map[*Node]bool)
+	return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+	if n.Tag != "" {
+		tag = n.Tag
+	}
+	value := n.Value
+	if tag != seqTag && tag != mapTag {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+	err := u.UnmarshalYAML(n)
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.ShortTag() == nullTag {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			outi := out.Addr().Interface()
+			if u, ok := outi.(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+			if u, ok := outi.(obsoleteUnmarshaler); ok {
+				good = d.callObsoleteUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+	if n.ShortTag() == nullTag {
+		return reflect.Value{}
+	}
+	for _, num := range index {
+		for {
+			if v.Kind() == reflect.Ptr {
+				if v.IsNil() {
+					v.Set(reflect.New(v.Type().Elem()))
+				}
+				v = v.Elem()
+				continue
+			}
+			break
+		}
+		v = v.Field(num)
+	}
+	return v
+}
+
+const (
+	// 400,000 decode operations is ~500kb of dense object declarations, or
+	// ~5kb of dense object declarations with 10000% alias expansion
+	alias_ratio_range_low = 400000
+
+	// 4,000,000 decode operations is ~5MB of dense object declarations, or
+	// ~4.5MB of dense object declarations with 10% alias expansion
+	alias_ratio_range_high = 4000000
+
+	// alias_ratio_range is the range over which we scale allowed alias ratios
+	alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+	switch {
+	case decodeCount <= alias_ratio_range_low:
+		// allow 99% to come from alias expansion for small-to-medium documents
+		return 0.99
+	case decodeCount >= alias_ratio_range_high:
+		// allow 10% to come from alias expansion for very large documents
+		return 0.10
+	default:
+		// scale smoothly from 99% down to 10% over the range.
+		// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+		// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+		return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+	}
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+	d.decodeCount++
+	if d.aliasDepth > 0 {
+		d.aliasCount++
+	}
+	if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+		failf("document contains excessive aliasing")
+	}
+	if out.Type() == nodeType {
+		out.Set(reflect.ValueOf(n).Elem())
+		return true
+	}
+	switch n.Kind {
+	case DocumentNode:
+		return d.document(n, out)
+	case AliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.Kind {
+	case ScalarNode:
+		good = d.scalar(n, out)
+	case MappingNode:
+		good = d.mapping(n, out)
+	case SequenceNode:
+		good = d.sequence(n, out)
+	case 0:
+		if n.IsZero() {
+			return d.null(out)
+		}
+		fallthrough
+	default:
+		failf("cannot decode node with unknown kind %d", n.Kind)
+	}
+	return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+	if len(n.Content) == 1 {
+		d.doc = n
+		d.unmarshal(n.Content[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
+		failf("anchor '%s' value contains itself", n.Value)
+	}
+	d.aliases[n] = true
+	d.aliasDepth++
+	good = d.unmarshal(n.Alias, out)
+	d.aliasDepth--
+	delete(d.aliases, n)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+	if out.CanAddr() {
+		switch out.Kind() {
+		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+			out.Set(reflect.Zero(out.Type()))
+			return true
+		}
+	}
+	return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+	var tag string
+	var resolved interface{}
+	if n.indicatedString() {
+		tag = strTag
+		resolved = n.Value
+	} else {
+		tag, resolved = resolve(n.Tag, n.Value)
+		if tag == binaryTag {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		return d.null(out)
+	}
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == binaryTag {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.Value)
+			}
+			err := u.UnmarshalText(text)
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == binaryTag {
+			out.SetString(resolved.(string))
+			return true
+		}
+		out.SetString(n.Value)
+		return true
+	case reflect.Interface:
+		out.Set(reflect.ValueOf(resolved))
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		// This used to work in v2, but it's very unfriendly.
+		isDuration := out.Type() == durationType
+
+		switch resolved := resolved.(type) {
+		case int:
+			if !isDuration && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case int64:
+			if !isDuration && !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				return true
+			}
+		case uint64:
+			if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case float64:
+			if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					return true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			return true
+		case string:
+			// This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+			// It only works if explicitly attempting to unmarshal into a typed bool value.
+			switch resolved {
+			case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+				out.SetBool(true)
+				return true
+			case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+				out.SetBool(false)
+				return true
+			}
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			return true
+		case int64:
+			out.SetFloat(float64(resolved))
+			return true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			return true
+		case float64:
+			out.SetFloat(resolved)
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
+		}
+	case reflect.Ptr:
+		panic("yaml internal error: please report the issue")
+	}
+	d.terror(n, tag, out)
+	return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+	l := len(n.Content)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, seqTag, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.Content[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+	l := len(n.Content)
+	if d.uniqueKeys {
+		nerrs := len(d.terrors)
+		for i := 0; i < l; i += 2 {
+			ni := n.Content[i]
+			for j := i + 2; j < l; j += 2 {
+				nj := n.Content[j]
+				if ni.Kind == nj.Kind && ni.Value == nj.Value {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+				}
+			}
+		}
+		if len(d.terrors) > nerrs {
+			return false
+		}
+	}
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		iface := out
+		if isStringMap(n) {
+			out = reflect.MakeMap(d.stringMapType)
+		} else {
+			out = reflect.MakeMap(d.generalMapType)
+		}
+		iface.Set(out)
+	default:
+		d.terror(n, mapTag, out)
+		return false
+	}
+
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	stringMapType := d.stringMapType
+	generalMapType := d.generalMapType
+	if outt.Elem() == ifaceType {
+		if outt.Key().Kind() == reflect.String {
+			d.stringMapType = outt
+		} else if outt.Key() == ifaceType {
+			d.generalMapType = outt
+		}
+	}
+
+	mapIsNew := false
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+		mapIsNew = true
+	}
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.Content[i]) {
+			d.merge(n.Content[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.Content[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
+				out.SetMapIndex(k, e)
+			}
+		}
+	}
+	d.stringMapType = stringMapType
+	d.generalMapType = generalMapType
+	return true
+}
+
+func isStringMap(n *Node) bool {
+	if n.Kind != MappingNode {
+		return false
+	}
+	l := len(n.Content)
+	for i := 0; i < l; i += 2 {
+		if n.Content[i].ShortTag() != strTag {
+			return false
+		}
+	}
+	return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	for _, index := range sinfo.InlineUnmarshalers {
+		field := d.fieldByIndex(n, out, index)
+		d.prepare(n, field)
+	}
+
+	var doneFields []bool
+	if d.uniqueKeys {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
+	name := settableValueOf("")
+	l := len(n.Content)
+	for i := 0; i < l; i += 2 {
+		ni := n.Content[i]
+		if isMerge(ni) {
+			d.merge(n.Content[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.uniqueKeys {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = d.fieldByIndex(n, out, info.Inline)
+			}
+			d.unmarshal(n.Content[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.Content[i+1], value)
+			inlineMap.SetMapIndex(name, value)
+		} else if d.knownFields {
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *Node, out reflect.Value) {
+	switch n.Kind {
+	case MappingNode:
+		d.unmarshal(n, out)
+	case AliasNode:
+		if n.Alias != nil && n.Alias.Kind != MappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case SequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.Content) - 1; i >= 0; i-- {
+			ni := n.Content[i]
+			if ni.Kind == AliasNode {
+				if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+					failWantMap()
+				}
+			} else if ni.Kind != MappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *Node) bool {
+	return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
new file mode 100644
index 0000000..0f47c9c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
@@ -0,0 +1,2020 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) {
+		return yaml_emitter_flush(emitter)
+	}
+	return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.buffer[emitter.buffer_pos] = value
+	emitter.buffer_pos++
+	emitter.column++
+	return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	switch emitter.line_break {
+	case yaml_CR_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\r'
+		emitter.buffer_pos += 1
+	case yaml_LN_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\n'
+		emitter.buffer_pos += 1
+	case yaml_CRLN_BREAK:
+		emitter.buffer[emitter.buffer_pos+0] = '\r'
+		emitter.buffer[emitter.buffer_pos+1] = '\n'
+		emitter.buffer_pos += 2
+	default:
+		panic("unknown line break setting")
+	}
+	if emitter.column == 0 {
+		emitter.space_above = true
+	}
+	emitter.column = 0
+	emitter.line++
+	// [Go] Do this here and below and drop from everywhere else (see commented lines).
+	emitter.indention = true
+	return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	p := emitter.buffer_pos
+	w := width(s[*i])
+	switch w {
+	case 4:
+		emitter.buffer[p+3] = s[*i+3]
+		fallthrough
+	case 3:
+		emitter.buffer[p+2] = s[*i+2]
+		fallthrough
+	case 2:
+		emitter.buffer[p+1] = s[*i+1]
+		fallthrough
+	case 1:
+		emitter.buffer[p+0] = s[*i+0]
+	default:
+		panic("unknown character width")
+	}
+	emitter.column++
+	emitter.buffer_pos += w
+	*i += w
+	return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+	for i := 0; i < len(s); {
+		if !write(emitter, s, &i) {
+			return false
+		}
+	}
+	return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if s[*i] == '\n' {
+		if !put_break(emitter) {
+			return false
+		}
+		*i++
+	} else {
+		if !write(emitter, s, i) {
+			return false
+		}
+		if emitter.column == 0 {
+			emitter.space_above = true
+		}
+		emitter.column = 0
+		emitter.line++
+		// [Go] Do this here and above and drop from everywhere else (see commented lines).
+		emitter.indention = true
+	}
+	return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_EMITTER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.events = append(emitter.events, *event)
+	for !yaml_emitter_need_more_events(emitter) {
+		event := &emitter.events[emitter.events_head]
+		if !yaml_emitter_analyze_event(emitter, event) {
+			return false
+		}
+		if !yaml_emitter_state_machine(emitter, event) {
+			return false
+		}
+		yaml_event_delete(event)
+		emitter.events_head++
+	}
+	return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+	if emitter.events_head == len(emitter.events) {
+		return true
+	}
+	var accumulate int
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_DOCUMENT_START_EVENT:
+		accumulate = 1
+		break
+	case yaml_SEQUENCE_START_EVENT:
+		accumulate = 2
+		break
+	case yaml_MAPPING_START_EVENT:
+		accumulate = 3
+		break
+	default:
+		return false
+	}
+	if len(emitter.events)-emitter.events_head > accumulate {
+		return false
+	}
+	var level int
+	for i := emitter.events_head; i < len(emitter.events); i++ {
+		switch emitter.events[i].typ {
+		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+			level++
+		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+			level--
+		}
+		if level == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+		}
+	}
+
+	// [Go] Do we actually need to copy this given garbage collection
+	// and the lack of deallocating destructors?
+	tag_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(tag_copy.handle, value.handle)
+	copy(tag_copy.prefix, value.prefix)
+	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+	return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+	emitter.indents = append(emitter.indents, emitter.indent)
+	if emitter.indent < 0 {
+		if flow {
+			emitter.indent = emitter.best_indent
+		} else {
+			emitter.indent = 0
+		}
+	} else if !indentless {
+		// [Go] This was changed so that indentations are more regular.
+		if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+			// The first indent inside a sequence will just skip the "- " indicator.
+			emitter.indent += 2
+		} else {
+			// Everything else aligns to the chosen indentation.
+			emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
+		}
+	}
+	return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	switch emitter.state {
+	default:
+	case yaml_EMIT_STREAM_START_STATE:
+		return yaml_emitter_emit_stream_start(emitter, event)
+
+	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, true)
+
+	case yaml_EMIT_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, false)
+
+	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+		return yaml_emitter_emit_document_content(emitter, event)
+
+	case yaml_EMIT_DOCUMENT_END_STATE:
+		return yaml_emitter_emit_document_end(emitter, event)
+
+	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+	case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+	case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_END_STATE:
+		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+	}
+	panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_STREAM_START_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+	}
+	if emitter.encoding == yaml_ANY_ENCODING {
+		emitter.encoding = event.encoding
+		if emitter.encoding == yaml_ANY_ENCODING {
+			emitter.encoding = yaml_UTF8_ENCODING
+		}
+	}
+	if emitter.best_indent < 2 || emitter.best_indent > 9 {
+		emitter.best_indent = 2
+	}
+	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+		emitter.best_width = 80
+	}
+	if emitter.best_width < 0 {
+		emitter.best_width = 1<<31 - 1
+	}
+	if emitter.line_break == yaml_ANY_BREAK {
+		emitter.line_break = yaml_LN_BREAK
+	}
+
+	emitter.indent = -1
+	emitter.line = 0
+	emitter.column = 0
+	emitter.whitespace = true
+	emitter.indention = true
+	emitter.space_above = true
+	emitter.foot_indent = -1
+
+	if emitter.encoding != yaml_UTF8_ENCODING {
+		if !yaml_emitter_write_bom(emitter) {
+			return false
+		}
+	}
+	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+	return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+	if event.typ == yaml_DOCUMENT_START_EVENT {
+
+		if event.version_directive != nil {
+			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(event.tag_directives); i++ {
+			tag_directive := &event.tag_directives[i]
+			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+				return false
+			}
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(default_tag_directives); i++ {
+			tag_directive := &default_tag_directives[i]
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+				return false
+			}
+		}
+
+		implicit := event.implicit
+		if !first || emitter.canonical {
+			implicit = false
+		}
+
+		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if event.version_directive != nil {
+			implicit = false
+			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if len(event.tag_directives) > 0 {
+			implicit = false
+			for i := 0; i < len(event.tag_directives); i++ {
+				tag_directive := &event.tag_directives[i]
+				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+					return false
+				}
+				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+					return false
+				}
+				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+					return false
+				}
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if yaml_emitter_check_empty_document(emitter) {
+			implicit = false
+		}
+		if !implicit {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+				return false
+			}
+			if emitter.canonical || true {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if len(emitter.head_comment) > 0 {
+			if !yaml_emitter_process_head_comment(emitter) {
+				return false
+			}
+			if !put_break(emitter) {
+				return false
+			}
+		}
+
+		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+		return true
+	}
+
+	if event.typ == yaml_STREAM_END_EVENT {
+		if emitter.open_ended {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_flush(emitter) {
+			return false
+		}
+		emitter.state = yaml_EMIT_END_STATE
+		return true
+	}
+
+	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_DOCUMENT_END_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+	}
+	// [Go] Force document foot separation.
+	emitter.foot_indent = 0
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	emitter.foot_indent = -1
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !event.implicit {
+		// [Go] Allocate the slice elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+	emitter.tag_directives = emitter.tag_directives[:0]
+	return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		if emitter.canonical && !first && !trail {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+		}
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.column == 0 || emitter.canonical && !first {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+			return false
+		}
+		if !yaml_emitter_process_line_comment(emitter) {
+			return false
+		}
+		if !yaml_emitter_process_foot_comment(emitter) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+
+		return true
+	}
+
+	if !first && !trail {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+	if emitter.column == 0 {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+	} else {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	}
+	if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+		return false
+	}
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_MAPPING_END_EVENT {
+		if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+		}
+		if !yaml_emitter_process_head_comment(emitter) {
+			return false
+		}
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+			return false
+		}
+		if !yaml_emitter_process_line_comment(emitter) {
+			return false
+		}
+		if !yaml_emitter_process_foot_comment(emitter) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+
+	if !first && !trail {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+
+	if emitter.column == 0 {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if emitter.canonical || emitter.column > emitter.best_width {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+			return false
+		}
+	}
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+	} else {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	}
+	if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+		return false
+	}
+	if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+	if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if !yaml_emitter_process_head_comment(emitter) {
+		return false
+	}
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if len(emitter.line_comment) > 0 {
+		// [Go] A line comment was provided for the key. That's unusual as the
+		//      scanner associates line comments with the value. Either way,
+		//      save the line comment and render it appropriately later.
+		emitter.key_line_comment = emitter.line_comment
+		emitter.line_comment = nil
+	}
+	if yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+			return false
+		}
+	}
+	if len(emitter.key_line_comment) > 0 {
+		// [Go] Line comments are generally associated with the value, but when there's
+		//      no value on the same line as a mapping key they end up attached to the
+		//      key itself.
+		if event.typ == yaml_SCALAR_EVENT {
+			if len(emitter.line_comment) == 0 {
+				// A scalar is coming and it has no line comments by itself yet,
+				// so just let it handle the line comment as usual. If it has a
+				// line comment, we can't have both so the one from the key is lost.
+				emitter.line_comment = emitter.key_line_comment
+				emitter.key_line_comment = nil
+			}
+		} else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
+			// An indented block follows, so write the comment right now.
+			emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+			if !yaml_emitter_process_line_comment(emitter) {
+				return false
+			}
+			emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+	if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_foot_comment(emitter) {
+		return false
+	}
+	return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+	root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+	emitter.root_context = root
+	emitter.sequence_context = sequence
+	emitter.mapping_context = mapping
+	emitter.simple_key_context = simple_key
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		return yaml_emitter_emit_alias(emitter, event)
+	case yaml_SCALAR_EVENT:
+		return yaml_emitter_emit_scalar(emitter, event)
+	case yaml_SEQUENCE_START_EVENT:
+		return yaml_emitter_emit_sequence_start(emitter, event)
+	case yaml_MAPPING_START_EVENT:
+		return yaml_emitter_emit_mapping_start(emitter, event)
+	default:
+		return yaml_emitter_set_emitter_error(emitter,
+			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+	}
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_select_scalar_style(emitter, event) {
+		return false
+	}
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if !yaml_emitter_increase_indent(emitter, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_scalar(emitter) {
+		return false
+	}
+	emitter.indent = emitter.indents[len(emitter.indents)-1]
+	emitter.indents = emitter.indents[:len(emitter.indents)-1]
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+		yaml_emitter_check_empty_sequence(emitter) {
+		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+	}
+	return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+		yaml_emitter_check_empty_mapping(emitter) {
+		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+	}
+	return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+	return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+	length := 0
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_ALIAS_EVENT:
+		length += len(emitter.anchor_data.anchor)
+	case yaml_SCALAR_EVENT:
+		if emitter.scalar_data.multiline {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix) +
+			len(emitter.scalar_data.value)
+	case yaml_SEQUENCE_START_EVENT:
+		if !yaml_emitter_check_empty_sequence(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	case yaml_MAPPING_START_EVENT:
+		if !yaml_emitter_check_empty_mapping(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	default:
+		return false
+	}
+	return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+	if no_tag && !event.implicit && !event.quoted_implicit {
+		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+	}
+
+	style := event.scalar_style()
+	if style == yaml_ANY_SCALAR_STYLE {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	if emitter.canonical {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	if emitter.simple_key_context && emitter.scalar_data.multiline {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+
+	if style == yaml_PLAIN_SCALAR_STYLE {
+		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if no_tag && !event.implicit {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+		if !emitter.scalar_data.single_quoted_allowed {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+
+	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+		emitter.tag_data.handle = []byte{'!'}
+	}
+	emitter.scalar_data.style = style
+	return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+	if emitter.anchor_data.anchor == nil {
+		return true
+	}
+	c := []byte{'&'}
+	if emitter.anchor_data.alias {
+		c[0] = '*'
+	}
+	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+		return false
+	}
+	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+		return true
+	}
+	if len(emitter.tag_data.handle) > 0 {
+		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+			return false
+		}
+		if len(emitter.tag_data.suffix) > 0 {
+			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+				return false
+			}
+		}
+	} else {
+		// [Go] Allocate these slices elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+	switch emitter.scalar_data.style {
+	case yaml_PLAIN_SCALAR_STYLE:
+		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_LITERAL_SCALAR_STYLE:
+		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+	case yaml_FOLDED_SCALAR_STYLE:
+		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+	}
+	panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+	if len(emitter.tail_comment) > 0 {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+			return false
+		}
+		emitter.tail_comment = emitter.tail_comment[:0]
+		emitter.foot_indent = emitter.indent
+		if emitter.foot_indent < 0 {
+			emitter.foot_indent = 0
+		}
+	}
+
+	if len(emitter.head_comment) == 0 {
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+		return false
+	}
+	emitter.head_comment = emitter.head_comment[:0]
+	return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+	if len(emitter.line_comment) == 0 {
+		return true
+	}
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+		return false
+	}
+	emitter.line_comment = emitter.line_comment[:0]
+	return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+	if len(emitter.foot_comment) == 0 {
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+		return false
+	}
+	emitter.foot_comment = emitter.foot_comment[:0]
+	emitter.foot_indent = emitter.indent
+	if emitter.foot_indent < 0 {
+		emitter.foot_indent = 0
+	}
+	return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+	if version_directive.major != 1 || version_directive.minor != 1 {
+		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+	}
+	return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+	handle := tag_directive.handle
+	prefix := tag_directive.prefix
+	if len(handle) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+	}
+	if handle[0] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+	}
+	if handle[len(handle)-1] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+	}
+	for i := 1; i < len(handle)-1; i += width(handle[i]) {
+		if !is_alpha(handle, i) {
+			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+		}
+	}
+	if len(prefix) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+	}
+	return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+	if len(anchor) == 0 {
+		problem := "anchor value must not be empty"
+		if alias {
+			problem = "alias value must not be empty"
+		}
+		return yaml_emitter_set_emitter_error(emitter, problem)
+	}
+	for i := 0; i < len(anchor); i += width(anchor[i]) {
+		if !is_alpha(anchor, i) {
+			problem := "anchor value must contain alphanumerical characters only"
+			if alias {
+				problem = "alias value must contain alphanumerical characters only"
+			}
+			return yaml_emitter_set_emitter_error(emitter, problem)
+		}
+	}
+	emitter.anchor_data.anchor = anchor
+	emitter.anchor_data.alias = alias
+	return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+	if len(tag) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+	}
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		tag_directive := &emitter.tag_directives[i]
+		if bytes.HasPrefix(tag, tag_directive.prefix) {
+			emitter.tag_data.handle = tag_directive.handle
+			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+			return true
+		}
+	}
+	emitter.tag_data.suffix = tag
+	return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	var (
+		block_indicators   = false
+		flow_indicators    = false
+		line_breaks        = false
+		special_characters = false
+		tab_characters     = false
+
+		leading_space  = false
+		leading_break  = false
+		trailing_space = false
+		trailing_break = false
+		break_space    = false
+		space_break    = false
+
+		preceded_by_whitespace = false
+		followed_by_whitespace = false
+		previous_space         = false
+		previous_break         = false
+	)
+
+	emitter.scalar_data.value = value
+
+	if len(value) == 0 {
+		emitter.scalar_data.multiline = false
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = true
+		emitter.scalar_data.single_quoted_allowed = true
+		emitter.scalar_data.block_allowed = false
+		return true
+	}
+
+	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+		block_indicators = true
+		flow_indicators = true
+	}
+
+	preceded_by_whitespace = true
+	for i, w := 0, 0; i < len(value); i += w {
+		w = width(value[i])
+		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+		if i == 0 {
+			switch value[i] {
+			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+				flow_indicators = true
+				block_indicators = true
+			case '?', ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '-':
+				if followed_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		} else {
+			switch value[i] {
+			case ',', '?', '[', ']', '{', '}':
+				flow_indicators = true
+			case ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '#':
+				if preceded_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		}
+
+		if value[i] == '\t' {
+			tab_characters = true
+		} else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+			special_characters = true
+		}
+		if is_space(value, i) {
+			if i == 0 {
+				leading_space = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_space = true
+			}
+			if previous_break {
+				break_space = true
+			}
+			previous_space = true
+			previous_break = false
+		} else if is_break(value, i) {
+			line_breaks = true
+			if i == 0 {
+				leading_break = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_break = true
+			}
+			if previous_space {
+				space_break = true
+			}
+			previous_space = false
+			previous_break = true
+		} else {
+			previous_space = false
+			previous_break = false
+		}
+
+		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+		preceded_by_whitespace = is_blankz(value, i)
+	}
+
+	emitter.scalar_data.multiline = line_breaks
+	emitter.scalar_data.flow_plain_allowed = true
+	emitter.scalar_data.block_plain_allowed = true
+	emitter.scalar_data.single_quoted_allowed = true
+	emitter.scalar_data.block_allowed = true
+
+	if leading_space || leading_break || trailing_space || trailing_break {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if trailing_space {
+		emitter.scalar_data.block_allowed = false
+	}
+	if break_space {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || tab_characters || special_characters {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
+		emitter.scalar_data.block_allowed = false
+	}
+	if line_breaks {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if flow_indicators {
+		emitter.scalar_data.flow_plain_allowed = false
+	}
+	if block_indicators {
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	emitter.anchor_data.anchor = nil
+	emitter.tag_data.handle = nil
+	emitter.tag_data.suffix = nil
+	emitter.scalar_data.value = nil
+
+	if len(event.head_comment) > 0 {
+		emitter.head_comment = event.head_comment
+	}
+	if len(event.line_comment) > 0 {
+		emitter.line_comment = event.line_comment
+	}
+	if len(event.foot_comment) > 0 {
+		emitter.foot_comment = event.foot_comment
+	}
+	if len(event.tail_comment) > 0 {
+		emitter.tail_comment = event.tail_comment
+	}
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+			return false
+		}
+
+	case yaml_SCALAR_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+		if !yaml_emitter_analyze_scalar(emitter, event.value) {
+			return false
+		}
+
+	case yaml_SEQUENCE_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+
+	case yaml_MAPPING_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+	if !flush(emitter) {
+		return false
+	}
+	pos := emitter.buffer_pos
+	emitter.buffer[pos+0] = '\xEF'
+	emitter.buffer[pos+1] = '\xBB'
+	emitter.buffer[pos+2] = '\xBF'
+	emitter.buffer_pos += 3
+	return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+	indent := emitter.indent
+	if indent < 0 {
+		indent = 0
+	}
+	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	if emitter.foot_indent == indent {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	for emitter.column < indent {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	emitter.whitespace = true
+	//emitter.indention = true
+	emitter.space_above = false
+	emitter.foot_indent = -1
+	return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, indicator) {
+		return false
+	}
+	emitter.whitespace = is_whitespace
+	emitter.indention = (emitter.indention && is_indention)
+	emitter.open_ended = false
+	return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	for i := 0; i < len(value); {
+		var must_write bool
+		switch value[i] {
+		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+			must_write = true
+		default:
+			must_write = is_alpha(value, i)
+		}
+		if must_write {
+			if !write(emitter, value, &i) {
+				return false
+			}
+		} else {
+			w := width(value[i])
+			for k := 0; k < w; k++ {
+				octet := value[i]
+				i++
+				if !put(emitter, '%') {
+					return false
+				}
+
+				c := octet >> 4
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+
+				c = octet & 0x0f
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+			}
+		}
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	if len(value) > 0 && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			//emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+
+	if len(value) > 0 {
+		emitter.whitespace = false
+	}
+	emitter.indention = false
+	if emitter.root_context {
+		emitter.open_ended = true
+	}
+
+	return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+		return false
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			//emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if value[i] == '\'' {
+				if !put(emitter, '\'') {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	spaces := false
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+		return false
+	}
+
+	for i := 0; i < len(value); {
+		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+			is_bom(value, i) || is_break(value, i) ||
+			value[i] == '"' || value[i] == '\\' {
+
+			octet := value[i]
+
+			var w int
+			var v rune
+			switch {
+			case octet&0x80 == 0x00:
+				w, v = 1, rune(octet&0x7F)
+			case octet&0xE0 == 0xC0:
+				w, v = 2, rune(octet&0x1F)
+			case octet&0xF0 == 0xE0:
+				w, v = 3, rune(octet&0x0F)
+			case octet&0xF8 == 0xF0:
+				w, v = 4, rune(octet&0x07)
+			}
+			for k := 1; k < w; k++ {
+				octet = value[i+k]
+				v = (v << 6) + (rune(octet) & 0x3F)
+			}
+			i += w
+
+			if !put(emitter, '\\') {
+				return false
+			}
+
+			var ok bool
+			switch v {
+			case 0x00:
+				ok = put(emitter, '0')
+			case 0x07:
+				ok = put(emitter, 'a')
+			case 0x08:
+				ok = put(emitter, 'b')
+			case 0x09:
+				ok = put(emitter, 't')
+			case 0x0A:
+				ok = put(emitter, 'n')
+			case 0x0b:
+				ok = put(emitter, 'v')
+			case 0x0c:
+				ok = put(emitter, 'f')
+			case 0x0d:
+				ok = put(emitter, 'r')
+			case 0x1b:
+				ok = put(emitter, 'e')
+			case 0x22:
+				ok = put(emitter, '"')
+			case 0x5c:
+				ok = put(emitter, '\\')
+			case 0x85:
+				ok = put(emitter, 'N')
+			case 0xA0:
+				ok = put(emitter, '_')
+			case 0x2028:
+				ok = put(emitter, 'L')
+			case 0x2029:
+				ok = put(emitter, 'P')
+			default:
+				if v <= 0xFF {
+					ok = put(emitter, 'x')
+					w = 2
+				} else if v <= 0xFFFF {
+					ok = put(emitter, 'u')
+					w = 4
+				} else {
+					ok = put(emitter, 'U')
+					w = 8
+				}
+				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+					digit := byte((v >> uint(k)) & 0x0F)
+					if digit < 10 {
+						ok = put(emitter, digit+'0')
+					} else {
+						ok = put(emitter, digit+'A'-10)
+					}
+				}
+			}
+			if !ok {
+				return false
+			}
+			spaces = false
+		} else if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				if is_space(value, i+1) {
+					if !put(emitter, '\\') {
+						return false
+					}
+				}
+				i += width(value[i])
+			} else if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = true
+		} else {
+			if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+	if is_space(value, 0) || is_break(value, 0) {
+		indent_hint := []byte{'0' + byte(emitter.best_indent)}
+		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+			return false
+		}
+	}
+
+	emitter.open_ended = false
+
+	var chomp_hint [1]byte
+	if len(value) == 0 {
+		chomp_hint[0] = '-'
+	} else {
+		i := len(value) - 1
+		for value[i]&0xC0 == 0x80 {
+			i--
+		}
+		if !is_break(value, i) {
+			chomp_hint[0] = '-'
+		} else if i == 0 {
+			chomp_hint[0] = '+'
+			emitter.open_ended = true
+		} else {
+			i--
+			for value[i]&0xC0 == 0x80 {
+				i--
+			}
+			if is_break(value, i) {
+				chomp_hint[0] = '+'
+				emitter.open_ended = true
+			}
+		}
+	}
+	if chomp_hint[0] != 0 {
+		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+	//emitter.indention = true
+	emitter.whitespace = true
+	breaks := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			//emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+
+	return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !yaml_emitter_process_line_comment(emitter) {
+		return false
+	}
+
+	//emitter.indention = true
+	emitter.whitespace = true
+
+	breaks := true
+	leading_spaces := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !breaks && !leading_spaces && value[i] == '\n' {
+				k := 0
+				for is_break(value, k) {
+					k += width(value[k])
+				}
+				if !is_blankz(value, k) {
+					if !put_break(emitter) {
+						return false
+					}
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			//emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				leading_spaces = is_blank(value, i)
+			}
+			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+	breaks := false
+	pound := false
+	for i := 0; i < len(comment); {
+		if is_break(comment, i) {
+			if !write_break(emitter, comment, &i) {
+				return false
+			}
+			//emitter.indention = true
+			breaks = true
+			pound = false
+		} else {
+			if breaks && !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !pound {
+				if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+					return false
+				}
+				pound = true
+			}
+			if !write(emitter, comment, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	if !breaks && !put_break(emitter) {
+		return false
+	}
+
+	emitter.whitespace = true
+	//emitter.indention = true
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
new file mode 100644
index 0000000..de9e72a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/encode.go
@@ -0,0 +1,577 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+type encoder struct {
+	emitter  yaml_emitter_t
+	event    yaml_event_t
+	out      []byte
+	flow     bool
+	indent   int
+	doneInit bool
+}
+
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	if e.indent == 0 {
+		e.indent = 4
+	}
+	e.emitter.best_indent = e.indent
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
+	e.emitter.open_ended = false
+	yaml_stream_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	var node *Node
+	if in.IsValid() {
+		node, _ = in.Interface().(*Node)
+	}
+	if node != nil && node.Kind == DocumentNode {
+		e.nodev(in)
+	} else {
+		yaml_document_start_event_initialize(&e.event, nil, nil, true)
+		e.emit()
+		e.marshal(tag, in)
+		yaml_document_end_event_initialize(&e.event, true)
+		e.emit()
+	}
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	tag = shortTag(tag)
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	switch value := iface.(type) {
+	case *Node:
+		e.nodev(in)
+		return
+	case Node:
+		if !in.CanAddr() {
+			var n = reflect.New(in.Type()).Elem()
+			n.Set(in)
+			in = n
+		}
+		e.nodev(in.Addr())
+		return
+	case time.Time:
+		e.timev(tag, in)
+		return
+	case *time.Time:
+		e.timev(tag, in.Elem())
+		return
+	case time.Duration:
+		e.stringv(tag, reflect.ValueOf(value.String()))
+		return
+	case Marshaler:
+		v, err := value.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		e.marshal(tag, reflect.ValueOf(v))
+		return
+	case encoding.TextMarshaler:
+		text, err := value.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		e.marshal(tag, in.Elem())
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		e.marshal(tag, in.Elem())
+	case reflect.Struct:
+		e.structv(tag, in)
+	case reflect.Slice, reflect.Array:
+		e.slicev(tag, in)
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		e.intv(tag, in)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+	for _, num := range index {
+		for {
+			if v.Kind() == reflect.Ptr {
+				if v.IsNil() {
+					return reflect.Value{}
+				}
+				v = v.Elem()
+				continue
+			}
+			break
+		}
+		v = v.Field(num)
+	}
+	return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = e.fieldByIndex(in, info.Inline)
+				if !value.IsValid() {
+					continue
+				}
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+	e.emit()
+	f()
+	yaml_mapping_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+	switch s {
+	case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+		"n", "N", "no", "No", "NO", "off", "Off", "OFF":
+		return true
+	default:
+		return false
+	}
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == binaryTag {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		}
+		if tag != "" {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = binaryTag
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+	}
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
+		if e.flow {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		} else {
+			style = yaml_LITERAL_SCALAR_STYLE
+		}
+	case canUsePlain:
+		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+	// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+	implicit := tag == ""
+	if !implicit {
+		tag = longTag(tag)
+	}
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.event.head_comment = head
+	e.event.line_comment = line
+	e.event.foot_comment = foot
+	e.event.tail_comment = tail
+	e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+	e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+	// Zero nodes behave as nil.
+	if node.Kind == 0 && node.IsZero() {
+		e.nilv()
+		return
+	}
+
+	// If the tag was not explicitly requested, and dropping it won't change the
+	// implicit tag of the value, don't include it in the presentation.
+	var tag = node.Tag
+	var stag = shortTag(tag)
+	var forceQuoting bool
+	if tag != "" && node.Style&TaggedStyle == 0 {
+		if node.Kind == ScalarNode {
+			if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+				tag = ""
+			} else {
+				rtag, _ := resolve("", node.Value)
+				if rtag == stag {
+					tag = ""
+				} else if stag == strTag {
+					tag = ""
+					forceQuoting = true
+				}
+			}
+		} else {
+			var rtag string
+			switch node.Kind {
+			case MappingNode:
+				rtag = mapTag
+			case SequenceNode:
+				rtag = seqTag
+			}
+			if rtag == stag {
+				tag = ""
+			}
+		}
+	}
+
+	switch node.Kind {
+	case DocumentNode:
+		yaml_document_start_event_initialize(&e.event, nil, nil, true)
+		e.event.head_comment = []byte(node.HeadComment)
+		e.emit()
+		for _, node := range node.Content {
+			e.node(node, "")
+		}
+		yaml_document_end_event_initialize(&e.event, true)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case SequenceNode:
+		style := yaml_BLOCK_SEQUENCE_STYLE
+		if node.Style&FlowStyle != 0 {
+			style = yaml_FLOW_SEQUENCE_STYLE
+		}
+		e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+		e.event.head_comment = []byte(node.HeadComment)
+		e.emit()
+		for _, node := range node.Content {
+			e.node(node, "")
+		}
+		e.must(yaml_sequence_end_event_initialize(&e.event))
+		e.event.line_comment = []byte(node.LineComment)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case MappingNode:
+		style := yaml_BLOCK_MAPPING_STYLE
+		if node.Style&FlowStyle != 0 {
+			style = yaml_FLOW_MAPPING_STYLE
+		}
+		yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+		e.event.tail_comment = []byte(tail)
+		e.event.head_comment = []byte(node.HeadComment)
+		e.emit()
+
+		// The tail logic below moves the foot comment of prior keys to the following key,
+		// since the value for each key may be a nested structure and the foot needs to be
+		// processed only the entirety of the value is streamed. The last tail is processed
+		// with the mapping end event.
+		var tail string
+		for i := 0; i+1 < len(node.Content); i += 2 {
+			k := node.Content[i]
+			foot := k.FootComment
+			if foot != "" {
+				kopy := *k
+				kopy.FootComment = ""
+				k = &kopy
+			}
+			e.node(k, tail)
+			tail = foot
+
+			v := node.Content[i+1]
+			e.node(v, "")
+		}
+
+		yaml_mapping_end_event_initialize(&e.event)
+		e.event.tail_comment = []byte(tail)
+		e.event.line_comment = []byte(node.LineComment)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case AliasNode:
+		yaml_alias_event_initialize(&e.event, []byte(node.Value))
+		e.event.head_comment = []byte(node.HeadComment)
+		e.event.line_comment = []byte(node.LineComment)
+		e.event.foot_comment = []byte(node.FootComment)
+		e.emit()
+
+	case ScalarNode:
+		value := node.Value
+		if !utf8.ValidString(value) {
+			if stag == binaryTag {
+				failf("explicitly tagged !!binary data must be base64-encoded")
+			}
+			if stag != "" {
+				failf("cannot marshal invalid UTF-8 data as %s", stag)
+			}
+			// It can't be encoded directly as YAML so use a binary tag
+			// and encode it as base64.
+			tag = binaryTag
+			value = encodeBase64(value)
+		}
+
+		style := yaml_PLAIN_SCALAR_STYLE
+		switch {
+		case node.Style&DoubleQuotedStyle != 0:
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		case node.Style&SingleQuotedStyle != 0:
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		case node.Style&LiteralStyle != 0:
+			style = yaml_LITERAL_SCALAR_STYLE
+		case node.Style&FoldedStyle != 0:
+			style = yaml_FOLDED_SCALAR_STYLE
+		case strings.Contains(value, "\n"):
+			style = yaml_LITERAL_SCALAR_STYLE
+		case forceQuoting:
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+
+		e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+	default:
+		failf("cannot encode node with unknown kind %d", node.Kind)
+	}
+}
diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod
new file mode 100644
index 0000000..f407ea3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v3"
+
+require (
+	"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
new file mode 100644
index 0000000..ac66fcc
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -0,0 +1,1249 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+	"bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+		token := &parser.tokens[parser.tokens_head]
+		yaml_parser_unfold_comments(parser, token)
+		return token
+	}
+	return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+	for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+		comment := &parser.comments[parser.comments_head]
+		if len(comment.head) > 0 {
+			if token.typ == yaml_BLOCK_END_TOKEN {
+				// No heads on ends, so keep comment.head for a follow up token.
+				break
+			}
+			if len(parser.head_comment) > 0 {
+				parser.head_comment = append(parser.head_comment, '\n')
+			}
+			parser.head_comment = append(parser.head_comment, comment.head...)
+		}
+		if len(comment.foot) > 0 {
+			if len(parser.foot_comment) > 0 {
+				parser.foot_comment = append(parser.foot_comment, '\n')
+			}
+			parser.foot_comment = append(parser.foot_comment, comment.foot...)
+		}
+		if len(comment.line) > 0 {
+			if len(parser.line_comment) > 0 {
+				parser.line_comment = append(parser.line_comment, '\n')
+			}
+			parser.line_comment = append(parser.line_comment, comment.line...)
+		}
+		*comment = yaml_comment_t{}
+		parser.comments_head++
+	}
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+	parser.token_available = false
+	parser.tokens_parsed++
+	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+	parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+	// Erase the event object.
+	*event = yaml_event_t{}
+
+	// No events after the end of the stream or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+		return true
+	}
+
+	// Generate the next event.
+	return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+	//trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+	switch parser.state {
+	case yaml_PARSE_STREAM_START_STATE:
+		return yaml_parser_parse_stream_start(parser, event)
+
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, true)
+
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, false)
+
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return yaml_parser_parse_document_content(parser, event)
+
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return yaml_parser_parse_document_end(parser, event)
+
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, true, false)
+
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return yaml_parser_parse_node(parser, event, true, true)
+
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, false, false)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_block_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+	default:
+		panic("invalid parser state")
+	}
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_STREAM_START_TOKEN {
+		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+	}
+	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_STREAM_START_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+		encoding:   token.encoding,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// Parse extra document end indicators.
+	if !implicit {
+		for token.typ == yaml_DOCUMENT_END_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+		token.typ != yaml_DOCUMENT_START_TOKEN &&
+		token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an implicit document.
+		if !yaml_parser_process_directives(parser, nil, nil) {
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+		var head_comment []byte
+		if len(parser.head_comment) > 0 {
+			// [Go] Scan the header comment backwards, and if an empty line is found, break
+			//      the header so the part before the last empty line goes into the
+			//      document header, while the bottom of it goes into a follow up event.
+			for i := len(parser.head_comment) - 1; i > 0; i-- {
+				if parser.head_comment[i] == '\n' {
+					if i == len(parser.head_comment)-1 {
+						head_comment = parser.head_comment[:i]
+						parser.head_comment = parser.head_comment[i+1:]
+						break
+					} else if parser.head_comment[i-1] == '\n' {
+						head_comment = parser.head_comment[:i-1]
+						parser.head_comment = parser.head_comment[i+1:]
+						break
+					}
+				}
+			}
+		}
+
+		*event = yaml_event_t{
+			typ:        yaml_DOCUMENT_START_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+
+			head_comment: head_comment,
+		}
+
+	} else if token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an explicit document.
+		var version_directive *yaml_version_directive_t
+		var tag_directives []yaml_tag_directive_t
+		start_mark := token.start_mark
+		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+			return false
+		}
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_DOCUMENT_START_TOKEN {
+			yaml_parser_set_parser_error(parser,
+				"did not find expected <document start>", token.start_mark)
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+		end_mark := token.end_mark
+
+		*event = yaml_event_t{
+			typ:               yaml_DOCUMENT_START_EVENT,
+			start_mark:        start_mark,
+			end_mark:          end_mark,
+			version_directive: version_directive,
+			tag_directives:    tag_directives,
+			implicit:          false,
+		}
+		skip_token(parser)
+
+	} else {
+		// Parse the stream end.
+		parser.state = yaml_PARSE_END_STATE
+		*event = yaml_event_t{
+			typ:        yaml_STREAM_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+	}
+
+	return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+		token.typ == yaml_DOCUMENT_START_TOKEN ||
+		token.typ == yaml_DOCUMENT_END_TOKEN ||
+		token.typ == yaml_STREAM_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		return yaml_parser_process_empty_scalar(parser, event,
+			token.start_mark)
+	}
+	return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	implicit := true
+	if token.typ == yaml_DOCUMENT_END_TOKEN {
+		end_mark = token.end_mark
+		skip_token(parser)
+		implicit = false
+	}
+
+	parser.tag_directives = parser.tag_directives[:0]
+
+	parser.state = yaml_PARSE_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_DOCUMENT_END_EVENT,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		implicit:   implicit,
+	}
+	yaml_parser_set_event_comments(parser, event)
+	if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+		event.foot_comment = event.head_comment
+		event.head_comment = nil
+	}
+	return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+	event.head_comment = parser.head_comment
+	event.line_comment = parser.line_comment
+	event.foot_comment = parser.foot_comment
+	parser.head_comment = nil
+	parser.line_comment = nil
+	parser.foot_comment = nil
+	parser.tail_comment = nil
+	parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_ALIAS_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		*event = yaml_event_t{
+			typ:        yaml_ALIAS_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+			anchor:     token.value,
+		}
+		yaml_parser_set_event_comments(parser, event)
+		skip_token(parser)
+		return true
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	var tag_token bool
+	var tag_handle, tag_suffix, anchor []byte
+	var tag_mark yaml_mark_t
+	if token.typ == yaml_ANCHOR_TOKEN {
+		anchor = token.value
+		start_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_TAG_TOKEN {
+			tag_token = true
+			tag_handle = token.value
+			tag_suffix = token.suffix
+			tag_mark = token.start_mark
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	} else if token.typ == yaml_TAG_TOKEN {
+		tag_token = true
+		tag_handle = token.value
+		tag_suffix = token.suffix
+		start_mark = token.start_mark
+		tag_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_ANCHOR_TOKEN {
+			anchor = token.value
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	var tag []byte
+	if tag_token {
+		if len(tag_handle) == 0 {
+			tag = tag_suffix
+			tag_suffix = nil
+		} else {
+			for i := range parser.tag_directives {
+				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+					tag = append(tag, tag_suffix...)
+					break
+				}
+			}
+			if len(tag) == 0 {
+				yaml_parser_set_parser_error_context(parser,
+					"while parsing a node", start_mark,
+					"found undefined tag handle", tag_mark)
+				return false
+			}
+		}
+	}
+
+	implicit := len(tag) == 0
+	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_SCALAR_TOKEN {
+		var plain_implicit, quoted_implicit bool
+		end_mark = token.end_mark
+		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+			plain_implicit = true
+		} else if len(tag) == 0 {
+			quoted_implicit = true
+		}
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			value:           token.value,
+			implicit:        plain_implicit,
+			quoted_implicit: quoted_implicit,
+			style:           yaml_style_t(token.style),
+		}
+		yaml_parser_set_event_comments(parser, event)
+		skip_token(parser)
+		return true
+	}
+	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+		// [Go] Some of the events below can be merged as they differ only on style.
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+		}
+		yaml_parser_set_event_comments(parser, event)
+		return true
+	}
+	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+		}
+		yaml_parser_set_event_comments(parser, event)
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		if parser.stem_comment != nil {
+			event.head_comment = parser.stem_comment
+			parser.stem_comment = nil
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+		}
+		if parser.stem_comment != nil {
+			event.head_comment = parser.stem_comment
+			parser.stem_comment = nil
+		}
+		return true
+	}
+	if len(anchor) > 0 || len(tag) > 0 {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			implicit:        implicit,
+			quoted_implicit: false,
+			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+		}
+		return true
+	}
+
+	context := "while parsing a flow node"
+	if block {
+		context = "while parsing a block node"
+	}
+	yaml_parser_set_parser_error_context(parser, context, start_mark,
+		"did not find expected node content", token.start_mark)
+	return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		prior_head_len := len(parser.head_comment)
+		skip_token(parser)
+		yaml_parser_split_stem_comment(parser, prior_head_len)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	}
+	if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block collection", context_mark,
+		"did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		prior_head_len := len(parser.head_comment)
+		skip_token(parser)
+		yaml_parser_split_stem_comment(parser, prior_head_len)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+			token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		}
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+	}
+	return true
+}
+
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+	if stem_len == 0 {
+		return
+	}
+
+	token := peek_token(parser)
+	if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+		return
+	}
+
+	parser.stem_comment = parser.head_comment[:stem_len]
+	if len(parser.head_comment) == stem_len {
+		parser.head_comment = nil
+	} else {
+		// Copy suffix to prevent very strange bugs if someone ever appends
+		// further bytes to the prefix in the stem_comment slice above.
+		parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+	}
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// [Go] A tail comment was left from the prior mapping value processed. Emit an event
+	//      as it needs to be processed with that value and not the following key.
+	if len(parser.tail_comment) > 0 {
+		*event = yaml_event_t{
+			typ:          yaml_TAIL_COMMENT_EVENT,
+			start_mark:   token.start_mark,
+			end_mark:     token.end_mark,
+			foot_comment: parser.tail_comment,
+		}
+		parser.tail_comment = nil
+		return true
+	}
+
+	if token.typ == yaml_KEY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	} else if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		yaml_parser_set_event_comments(parser, event)
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block mapping", context_mark,
+		"did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		}
+		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow sequence", context_mark,
+					"did not find expected ',' or ']'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+			*event = yaml_event_t{
+				typ:        yaml_MAPPING_START_EVENT,
+				start_mark: token.start_mark,
+				end_mark:   token.end_mark,
+				implicit:   true,
+				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+			}
+			skip_token(parser)
+			return true
+		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	yaml_parser_set_event_comments(parser, event)
+
+	skip_token(parser)
+	return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_VALUE_TOKEN &&
+		token.typ != yaml_FLOW_ENTRY_TOKEN &&
+		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+		return yaml_parser_parse_node(parser, event, false, false)
+	}
+	mark := token.end_mark
+	skip_token(parser)
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+	return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token := peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow mapping", context_mark,
+					"did not find expected ',' or '}'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+			if token.typ != yaml_VALUE_TOKEN &&
+				token.typ != yaml_FLOW_ENTRY_TOKEN &&
+				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+				return yaml_parser_parse_node(parser, event, false, false)
+			} else {
+				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+			}
+		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	yaml_parser_set_event_comments(parser, event)
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if empty {
+		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+	*event = yaml_event_t{
+		typ:        yaml_SCALAR_EVENT,
+		start_mark: mark,
+		end_mark:   mark,
+		value:      nil, // Empty
+		implicit:   true,
+		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+	}
+	return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+	{[]byte("!"), []byte("!")},
+	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+	version_directive_ref **yaml_version_directive_t,
+	tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+	var version_directive *yaml_version_directive_t
+	var tag_directives []yaml_tag_directive_t
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+			if version_directive != nil {
+				yaml_parser_set_parser_error(parser,
+					"found duplicate %YAML directive", token.start_mark)
+				return false
+			}
+			if token.major != 1 || token.minor != 1 {
+				yaml_parser_set_parser_error(parser,
+					"found incompatible YAML document", token.start_mark)
+				return false
+			}
+			version_directive = &yaml_version_directive_t{
+				major: token.major,
+				minor: token.minor,
+			}
+		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+			value := yaml_tag_directive_t{
+				handle: token.value,
+				prefix: token.prefix,
+			}
+			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+				return false
+			}
+			tag_directives = append(tag_directives, value)
+		}
+
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+	}
+
+	for i := range default_tag_directives {
+		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+			return false
+		}
+	}
+
+	if version_directive_ref != nil {
+		*version_directive_ref = version_directive
+	}
+	if tag_directives_ref != nil {
+		*tag_directives_ref = tag_directives
+	}
+	return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+	for i := range parser.tag_directives {
+		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+		}
+	}
+
+	// [Go] I suspect the copy is unnecessary. This was likely done
+	// because there was no way to track ownership of the data.
+	value_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(value_copy.handle, value.handle)
+	copy(value_copy.prefix, value.prefix)
+	parser.tag_directives = append(parser.tag_directives, value_copy)
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
new file mode 100644
index 0000000..b7de0a8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
@@ -0,0 +1,434 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+	"io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+	parser.error = yaml_READER_ERROR
+	parser.problem = problem
+	parser.problem_offset = offset
+	parser.problem_value = value
+	return false
+}
+
+// Byte order marks.
+const (
+	bom_UTF8    = "\xef\xbb\xbf"
+	bom_UTF16LE = "\xff\xfe"
+	bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+	// Ensure that we had enough bytes in the raw buffer.
+	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+		if !yaml_parser_update_raw_buffer(parser) {
+			return false
+		}
+	}
+
+	// Determine the encoding.
+	buf := parser.raw_buffer
+	pos := parser.raw_buffer_pos
+	avail := len(buf) - pos
+	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+		parser.encoding = yaml_UTF16LE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+		parser.encoding = yaml_UTF16BE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+		parser.encoding = yaml_UTF8_ENCODING
+		parser.raw_buffer_pos += 3
+		parser.offset += 3
+	} else {
+		parser.encoding = yaml_UTF8_ENCODING
+	}
+	return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+	size_read := 0
+
+	// Return if the raw buffer is full.
+	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+		return true
+	}
+
+	// Return on EOF.
+	if parser.eof {
+		return true
+	}
+
+	// Move the remaining bytes in the raw buffer to the beginning.
+	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+	}
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+	parser.raw_buffer_pos = 0
+
+	// Call the read handler to fill the buffer.
+	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+	if err == io.EOF {
+		parser.eof = true
+	} else if err != nil {
+		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+	}
+	return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+	if parser.read_handler == nil {
+		panic("read handler must be set")
+	}
+
+	// [Go] This function was changed to guarantee the requested length size at EOF.
+	// The fact we need to do this is pretty awful, but the description above implies
+	// for that to be the case, and there are tests
+
+	// If the EOF flag is set and the raw buffer is empty, do nothing.
+	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+		// [Go] ACTUALLY! Read the documentation of this function above.
+		// This is just broken. To return true, we need to have the
+		// given length in the buffer. Not doing that means every single
+		// check that calls this function to make sure the buffer has a
+		// given length is Go) panicking; or C) accessing invalid memory.
+		//return true
+	}
+
+	// Return if the buffer contains enough characters.
+	if parser.unread >= length {
+		return true
+	}
+
+	// Determine the input encoding if it is not known yet.
+	if parser.encoding == yaml_ANY_ENCODING {
+		if !yaml_parser_determine_encoding(parser) {
+			return false
+		}
+	}
+
+	// Move the unread characters to the beginning of the buffer.
+	buffer_len := len(parser.buffer)
+	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+		buffer_len -= parser.buffer_pos
+		parser.buffer_pos = 0
+	} else if parser.buffer_pos == buffer_len {
+		buffer_len = 0
+		parser.buffer_pos = 0
+	}
+
+	// Open the whole buffer for writing, and cut it before returning.
+	parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+	// Fill the buffer until it has enough characters.
+	first := true
+	for parser.unread < length {
+
+		// Fill the raw buffer if necessary.
+		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+			if !yaml_parser_update_raw_buffer(parser) {
+				parser.buffer = parser.buffer[:buffer_len]
+				return false
+			}
+		}
+		first = false
+
+		// Decode the raw buffer.
+	inner:
+		for parser.raw_buffer_pos != len(parser.raw_buffer) {
+			var value rune
+			var width int
+
+			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+			// Decode the next character.
+			switch parser.encoding {
+			case yaml_UTF8_ENCODING:
+				// Decode a UTF-8 character.  Check RFC 3629
+				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+				//
+				// The following table (taken from the RFC) is used for
+				// decoding.
+				//
+				//    Char. number range |        UTF-8 octet sequence
+				//      (hexadecimal)    |              (binary)
+				//   --------------------+------------------------------------
+				//   0000 0000-0000 007F | 0xxxxxxx
+				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				//
+				// Additionally, the characters in the range 0xD800-0xDFFF
+				// are prohibited as they are reserved for use with UTF-16
+				// surrogate pairs.
+
+				// Determine the length of the UTF-8 sequence.
+				octet := parser.raw_buffer[parser.raw_buffer_pos]
+				switch {
+				case octet&0x80 == 0x00:
+					width = 1
+				case octet&0xE0 == 0xC0:
+					width = 2
+				case octet&0xF0 == 0xE0:
+					width = 3
+				case octet&0xF8 == 0xF0:
+					width = 4
+				default:
+					// The leading octet is invalid.
+					return yaml_parser_set_reader_error(parser,
+						"invalid leading UTF-8 octet",
+						parser.offset, int(octet))
+				}
+
+				// Check if the raw buffer contains an incomplete character.
+				if width > raw_unread {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-8 octet sequence",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Decode the leading octet.
+				switch {
+				case octet&0x80 == 0x00:
+					value = rune(octet & 0x7F)
+				case octet&0xE0 == 0xC0:
+					value = rune(octet & 0x1F)
+				case octet&0xF0 == 0xE0:
+					value = rune(octet & 0x0F)
+				case octet&0xF8 == 0xF0:
+					value = rune(octet & 0x07)
+				default:
+					value = 0
+				}
+
+				// Check and decode the trailing octets.
+				for k := 1; k < width; k++ {
+					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+					// Check if the octet is valid.
+					if (octet & 0xC0) != 0x80 {
+						return yaml_parser_set_reader_error(parser,
+							"invalid trailing UTF-8 octet",
+							parser.offset+k, int(octet))
+					}
+
+					// Decode the octet.
+					value = (value << 6) + rune(octet&0x3F)
+				}
+
+				// Check the length of the sequence against the value.
+				switch {
+				case width == 1:
+				case width == 2 && value >= 0x80:
+				case width == 3 && value >= 0x800:
+				case width == 4 && value >= 0x10000:
+				default:
+					return yaml_parser_set_reader_error(parser,
+						"invalid length of a UTF-8 sequence",
+						parser.offset, -1)
+				}
+
+				// Check the range of the value.
+				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+					return yaml_parser_set_reader_error(parser,
+						"invalid Unicode character",
+						parser.offset, int(value))
+				}
+
+			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+				var low, high int
+				if parser.encoding == yaml_UTF16LE_ENCODING {
+					low, high = 0, 1
+				} else {
+					low, high = 1, 0
+				}
+
+				// The UTF-16 encoding is not as simple as one might
+				// naively think.  Check RFC 2781
+				// (http://www.ietf.org/rfc/rfc2781.txt).
+				//
+				// Normally, two subsequent bytes describe a Unicode
+				// character.  However a special technique (called a
+				// surrogate pair) is used for specifying character
+				// values larger than 0xFFFF.
+				//
+				// A surrogate pair consists of two pseudo-characters:
+				//      high surrogate area (0xD800-0xDBFF)
+				//      low surrogate area (0xDC00-0xDFFF)
+				//
+				// The following formulas are used for decoding
+				// and encoding characters using surrogate pairs:
+				//
+				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+				//  W1 = 110110yyyyyyyyyy
+				//  W2 = 110111xxxxxxxxxx
+				//
+				// where U is the character value, W1 is the high surrogate
+				// area, W2 is the low surrogate area.
+
+				// Check for incomplete UTF-16 character.
+				if raw_unread < 2 {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-16 character",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Get the character.
+				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+				// Check for unexpected low surrogate area.
+				if value&0xFC00 == 0xDC00 {
+					return yaml_parser_set_reader_error(parser,
+						"unexpected low surrogate area",
+						parser.offset, int(value))
+				}
+
+				// Check for a high surrogate area.
+				if value&0xFC00 == 0xD800 {
+					width = 4
+
+					// Check for incomplete surrogate pair.
+					if raw_unread < 4 {
+						if parser.eof {
+							return yaml_parser_set_reader_error(parser,
+								"incomplete UTF-16 surrogate pair",
+								parser.offset, -1)
+						}
+						break inner
+					}
+
+					// Get the next character.
+					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+					// Check for a low surrogate area.
+					if value2&0xFC00 != 0xDC00 {
+						return yaml_parser_set_reader_error(parser,
+							"expected low surrogate area",
+							parser.offset+2, int(value2))
+					}
+
+					// Generate the value of the surrogate pair.
+					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+				} else {
+					width = 2
+				}
+
+			default:
+				panic("impossible")
+			}
+
+			// Check if the character is in the allowed range:
+			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+			//      | [#x10000-#x10FFFF]                        (32 bit)
+			switch {
+			case value == 0x09:
+			case value == 0x0A:
+			case value == 0x0D:
+			case value >= 0x20 && value <= 0x7E:
+			case value == 0x85:
+			case value >= 0xA0 && value <= 0xD7FF:
+			case value >= 0xE000 && value <= 0xFFFD:
+			case value >= 0x10000 && value <= 0x10FFFF:
+			default:
+				return yaml_parser_set_reader_error(parser,
+					"control characters are not allowed",
+					parser.offset, int(value))
+			}
+
+			// Move the raw pointers.
+			parser.raw_buffer_pos += width
+			parser.offset += width
+
+			// Finally put the character into the buffer.
+			if value <= 0x7F {
+				// 0000 0000-0000 007F . 0xxxxxxx
+				parser.buffer[buffer_len+0] = byte(value)
+				buffer_len += 1
+			} else if value <= 0x7FF {
+				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+				buffer_len += 2
+			} else if value <= 0xFFFF {
+				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+				buffer_len += 3
+			} else {
+				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+				buffer_len += 4
+			}
+
+			parser.unread++
+		}
+
+		// On EOF, put NUL into the buffer and return.
+		if parser.eof {
+			parser.buffer[buffer_len] = 0
+			buffer_len++
+			parser.unread++
+			break
+		}
+	}
+	// [Go] Read the documentation of this function above. To return true,
+	// we need to have the given length in the buffer. Not doing that means
+	// every single check that calls this function to make sure the buffer
+	// has a given length is Go) panicking; or C) accessing invalid memory.
+	// This happens here due to the EOF above breaking early.
+	for buffer_len < length {
+		parser.buffer[buffer_len] = 0
+		buffer_len++
+	}
+	parser.buffer = parser.buffer[:buffer_len]
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
new file mode 100644
index 0000000..64ae888
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+	"encoding/base64"
+	"math"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type resolveMapItem struct {
+	value interface{}
+	tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+	t := resolveTable
+	t[int('+')] = 'S' // Sign
+	t[int('-')] = 'S'
+	for _, c := range "0123456789" {
+		t[int(c)] = 'D' // Digit
+	}
+	for _, c := range "yYnNtTfFoO~" {
+		t[int(c)] = 'M' // In map
+	}
+	t[int('.')] = '.' // Float (potentially in map)
+
+	var resolveMapList = []struct {
+		v   interface{}
+		tag string
+		l   []string
+	}{
+		{true, boolTag, []string{"true", "True", "TRUE"}},
+		{false, boolTag, []string{"false", "False", "FALSE"}},
+		{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", mergeTag, []string{"<<"}},
+	}
+
+	m := resolveMap
+	for _, item := range resolveMapList {
+		for _, s := range item.l {
+			m[s] = resolveMapItem{item.v, item.tag}
+		}
+	}
+}
+
+const (
+	nullTag      = "!!null"
+	boolTag      = "!!bool"
+	strTag       = "!!str"
+	intTag       = "!!int"
+	floatTag     = "!!float"
+	timestampTag = "!!timestamp"
+	seqTag       = "!!seq"
+	mapTag       = "!!map"
+	binaryTag    = "!!binary"
+	mergeTag     = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+	for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+		ltag := longTag(stag)
+		longTags[stag] = ltag
+		shortTags[ltag] = stag
+	}
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+	if strings.HasPrefix(tag, longTagPrefix) {
+		if stag, ok := shortTags[tag]; ok {
+			return stag
+		}
+		return "!!" + tag[len(longTagPrefix):]
+	}
+	return tag
+}
+
+func longTag(tag string) string {
+	if strings.HasPrefix(tag, "!!") {
+		if ltag, ok := longTags[tag]; ok {
+			return ltag
+		}
+		return longTagPrefix + tag[2:]
+	}
+	return tag
+}
+
+func resolvableTag(tag string) bool {
+	switch tag {
+	case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+		return true
+	}
+	return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+	tag = shortTag(tag)
+	if !resolvableTag(tag) {
+		return tag, in
+	}
+
+	defer func() {
+		switch tag {
+		case "", rtag, strTag, binaryTag:
+			return
+		case floatTag:
+			if rtag == intTag {
+				switch v := out.(type) {
+				case int64:
+					rtag = floatTag
+					out = float64(v)
+					return
+				case int:
+					rtag = floatTag
+					out = float64(v)
+					return
+				}
+			}
+		}
+		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+	}()
+
+	// Any data is accepted as a !!str or !!binary.
+	// Otherwise, the prefix is enough of a hint about what it might be.
+	hint := byte('N')
+	if in != "" {
+		hint = resolveTable[in[0]]
+	}
+	if hint != 0 && tag != strTag && tag != binaryTag {
+		// Handle things we can lookup in a map.
+		if item, ok := resolveMap[in]; ok {
+			return item.tag, item.value
+		}
+
+		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+		// are purposefully unsupported here. They're still quoted on
+		// the way out for compatibility with other parser, though.
+
+		switch hint {
+		case 'M':
+			// We've already checked the map above.
+
+		case '.':
+			// Not in the map, so maybe a normal float.
+			floatv, err := strconv.ParseFloat(in, 64)
+			if err == nil {
+				return floatTag, floatv
+			}
+
+		case 'D', 'S':
+			// Int, float, or timestamp.
+			// Only try values as a timestamp if the value is unquoted or there's an explicit
+			// !!timestamp tag.
+			if tag == "" || tag == timestampTag {
+				t, ok := parseTimestamp(in)
+				if ok {
+					return timestampTag, t
+				}
+			}
+
+			plain := strings.Replace(in, "_", "", -1)
+			intv, err := strconv.ParseInt(plain, 0, 64)
+			if err == nil {
+				if intv == int64(int(intv)) {
+					return intTag, int(intv)
+				} else {
+					return intTag, intv
+				}
+			}
+			uintv, err := strconv.ParseUint(plain, 0, 64)
+			if err == nil {
+				return intTag, uintv
+			}
+			if yamlStyleFloat.MatchString(plain) {
+				floatv, err := strconv.ParseFloat(plain, 64)
+				if err == nil {
+					return floatTag, floatv
+				}
+			}
+			if strings.HasPrefix(plain, "0b") {
+				intv, err := strconv.ParseInt(plain[2:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return intTag, int(intv)
+					} else {
+						return intTag, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+				if err == nil {
+					return intTag, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0b") {
+				intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+				if err == nil {
+					if true || intv == int64(int(intv)) {
+						return intTag, int(intv)
+					} else {
+						return intTag, intv
+					}
+				}
+			}
+			// Octals as introduced in version 1.2 of the spec.
+			// Octals from the 1.1 spec, spelled as 0777, are still
+			// decoded by default in v3 as well for compatibility.
+			// May be dropped in v4 depending on how usage evolves.
+			if strings.HasPrefix(plain, "0o") {
+				intv, err := strconv.ParseInt(plain[2:], 8, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return intTag, int(intv)
+					} else {
+						return intTag, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+				if err == nil {
+					return intTag, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0o") {
+				intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+				if err == nil {
+					if true || intv == int64(int(intv)) {
+						return intTag, int(intv)
+					} else {
+						return intTag, intv
+					}
+				}
+			}
+		default:
+			panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+		}
+	}
+	return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+	const lineLen = 70
+	encLen := base64.StdEncoding.EncodedLen(len(s))
+	lines := encLen/lineLen + 1
+	buf := make([]byte, encLen*2+lines)
+	in := buf[0:encLen]
+	out := buf[encLen:]
+	base64.StdEncoding.Encode(in, []byte(s))
+	k := 0
+	for i := 0; i < len(in); i += lineLen {
+		j := i + lineLen
+		if j > len(in) {
+			j = len(in)
+		}
+		k += copy(out[k:], in[i:j])
+		if lines > 1 {
+			out[k] = '\n'
+			k++
+		}
+	}
+	return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
+	"2006-1-2",                        // date only
+	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+	// from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+	// TODO write code to check all the formats supported by
+	// http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+	// Quick check: all date formats start with YYYY-.
+	i := 0
+	for ; i < len(s); i++ {
+		if c := s[i]; c < '0' || c > '9' {
+			break
+		}
+	}
+	if i != 4 || i == len(s) || s[i] != '-' {
+		return time.Time{}, false
+	}
+	for _, format := range allowedTimestampFormats {
+		if t, err := time.Parse(format, s); err == nil {
+			return t, true
+		}
+	}
+	return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
new file mode 100644
index 0000000..ca00701
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
@@ -0,0 +1,3038 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		parser.newlines = 0
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+	if is_crlf(parser.buffer, parser.buffer_pos) {
+		parser.mark.index += 2
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread -= 2
+		parser.buffer_pos += 2
+		parser.newlines++
+	} else if is_break(parser.buffer, parser.buffer_pos) {
+		parser.mark.index++
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread--
+		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+		parser.newlines++
+	}
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		parser.newlines = 0
+	}
+	w := width(parser.buffer[parser.buffer_pos])
+	if w == 0 {
+		panic("invalid character sequence")
+	}
+	if len(s) == 0 {
+		s = make([]byte, 0, 32)
+	}
+	if w == 1 && len(s)+w <= cap(s) {
+		s = s[:len(s)+1]
+		s[len(s)-1] = parser.buffer[parser.buffer_pos]
+		parser.buffer_pos++
+	} else {
+		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+		parser.buffer_pos += w
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+	buf := parser.buffer
+	pos := parser.buffer_pos
+	switch {
+	case buf[pos] == '\r' && buf[pos+1] == '\n':
+		// CR LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+		parser.mark.index++
+		parser.unread--
+	case buf[pos] == '\r' || buf[pos] == '\n':
+		// CR|LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 1
+	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+		// NEL . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+		// LS|PS . LS|PS
+		s = append(s, buf[parser.buffer_pos:pos+3]...)
+		parser.buffer_pos += 3
+	default:
+		return s
+	}
+	parser.mark.index++
+	parser.mark.column = 0
+	parser.mark.line++
+	parser.unread--
+	parser.newlines++
+	return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Erase the token object.
+	*token = yaml_token_t{} // [Go] Is this necessary?
+
+	// No tokens after STREAM-END or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+		return true
+	}
+
+	// Ensure that the tokens queue contains enough tokens.
+	if !parser.token_available {
+		if !yaml_parser_fetch_more_tokens(parser) {
+			return false
+		}
+	}
+
+	// Fetch the next token from the queue.
+	*token = parser.tokens[parser.tokens_head]
+	parser.tokens_head++
+	parser.tokens_parsed++
+	parser.token_available = false
+
+	if token.typ == yaml_STREAM_END_TOKEN {
+		parser.stream_end_produced = true
+	}
+	return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+	parser.error = yaml_SCANNER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = parser.mark
+	return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+	context := "while parsing a tag"
+	if directive {
+		context = "while parsing a %TAG directive"
+	}
+	return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+	pargs := append([]interface{}{"+++"}, args...)
+	fmt.Println(pargs...)
+	pargs = append([]interface{}{"---"}, args...)
+	return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+	// While we need more tokens to fetch, do it.
+	for {
+		// [Go] The comment parsing logic requires a lookahead of two tokens
+		// so that foot comments may be parsed in time of associating them
+		// with the tokens that are parsed before them, and also for line
+		// comments to be transformed into head comments in some edge cases.
+		if parser.tokens_head < len(parser.tokens)-2 {
+			// If a potential simple key is at the head position, we need to fetch
+			// the next token to disambiguate it.
+			head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+			if !ok {
+				break
+			} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+				return false
+			} else if !valid {
+				break
+			}
+		}
+		// Fetch the next token.
+		if !yaml_parser_fetch_next_token(parser) {
+			return false
+		}
+	}
+
+	parser.token_available = true
+	return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+	// Ensure that the buffer is initialized.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check if we just started scanning.  Fetch STREAM-START then.
+	if !parser.stream_start_produced {
+		return yaml_parser_fetch_stream_start(parser)
+	}
+
+	scan_mark := parser.mark
+
+	// Eat whitespaces and comments until we reach the next token.
+	if !yaml_parser_scan_to_next_token(parser) {
+		return false
+	}
+
+	// [Go] While unrolling indents, transform the head comments of prior
+	// indentation levels observed after scan_start into foot comments at
+	// the respective indexes.
+
+	// Check the indentation level against the current column.
+	if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+		return false
+	}
+
+	// Ensure that the buffer contains at least 4 characters.  4 is the length
+	// of the longest indicators ('--- ' and '... ').
+	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+		return false
+	}
+
+	// Is it the end of the stream?
+	if is_z(parser.buffer, parser.buffer_pos) {
+		return yaml_parser_fetch_stream_end(parser)
+	}
+
+	// Is it a directive?
+	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+		return yaml_parser_fetch_directive(parser)
+	}
+
+	buf := parser.buffer
+	pos := parser.buffer_pos
+
+	// Is it the document start indicator?
+	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+	}
+
+	// Is it the document end indicator?
+	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+	}
+
+	comment_mark := parser.mark
+	if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+		// Associate any following comments with the prior token.
+		comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+	}
+	defer func() {
+		if !ok {
+			return
+		}
+		if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+			// Sequence indicators alone have no line comments. It becomes
+			// a head comment for whatever follows.
+			return
+		}
+		if !yaml_parser_scan_line_comment(parser, comment_mark) {
+			ok = false
+			return
+		}
+	}()
+
+	// Is it the flow sequence start indicator?
+	if buf[pos] == '[' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+	}
+
+	// Is it the flow mapping start indicator?
+	if parser.buffer[parser.buffer_pos] == '{' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+	}
+
+	// Is it the flow sequence end indicator?
+	if parser.buffer[parser.buffer_pos] == ']' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_SEQUENCE_END_TOKEN)
+	}
+
+	// Is it the flow mapping end indicator?
+	if parser.buffer[parser.buffer_pos] == '}' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_MAPPING_END_TOKEN)
+	}
+
+	// Is it the flow entry indicator?
+	if parser.buffer[parser.buffer_pos] == ',' {
+		return yaml_parser_fetch_flow_entry(parser)
+	}
+
+	// Is it the block entry indicator?
+	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+		return yaml_parser_fetch_block_entry(parser)
+	}
+
+	// Is it the key indicator?
+	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_key(parser)
+	}
+
+	// Is it the value indicator?
+	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_value(parser)
+	}
+
+	// Is it an alias?
+	if parser.buffer[parser.buffer_pos] == '*' {
+		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+	}
+
+	// Is it an anchor?
+	if parser.buffer[parser.buffer_pos] == '&' {
+		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+	}
+
+	// Is it a tag?
+	if parser.buffer[parser.buffer_pos] == '!' {
+		return yaml_parser_fetch_tag(parser)
+	}
+
+	// Is it a literal scalar?
+	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, true)
+	}
+
+	// Is it a folded scalar?
+	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, false)
+	}
+
+	// Is it a single-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '\'' {
+		return yaml_parser_fetch_flow_scalar(parser, true)
+	}
+
+	// Is it a double-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '"' {
+		return yaml_parser_fetch_flow_scalar(parser, false)
+	}
+
+	// Is it a plain scalar?
+	//
+	// A plain scalar may start with any non-blank characters except
+	//
+	//      '-', '?', ':', ',', '[', ']', '{', '}',
+	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
+	//      '%', '@', '`'.
+	//
+	// In the block context (and, for the '-' indicator, in the flow context
+	// too), it may also start with the characters
+	//
+	//      '-', '?', ':'
+	//
+	// if it is followed by a non-space character.
+	//
+	// The last rule is more restrictive than the specification requires.
+	// [Go] TODO Make this logic more reasonable.
+	//switch parser.buffer[parser.buffer_pos] {
+	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+	//}
+	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+		(parser.flow_level == 0 &&
+			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_plain_scalar(parser)
+	}
+
+	// If we don't determine the token type so far, it is an error.
+	return yaml_parser_set_scanner_error(parser,
+		"while scanning for the next token", parser.mark,
+		"found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+	if !simple_key.possible {
+		return false, true
+	}
+
+	// The 1.2 specification says:
+	//
+	//     "If the ? indicator is omitted, parsing needs to see past the
+	//     implicit key to recognize it as such. To limit the amount of
+	//     lookahead required, the “:” indicator must appear at most 1024
+	//     Unicode characters beyond the start of the key. In addition, the key
+	//     is restricted to a single line."
+	//
+	if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+		// Check if the potential simple key to be removed is required.
+		if simple_key.required {
+			return false, yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", simple_key.mark,
+				"could not find expected ':'")
+		}
+		simple_key.possible = false
+		return false, true
+	}
+	return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+	// A simple key is required at the current position if the scanner is in
+	// the block context and the current column coincides with the indentation
+	// level.
+
+	required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+	//
+	// If the current position may start a simple key, save it.
+	//
+	if parser.simple_key_allowed {
+		simple_key := yaml_simple_key_t{
+			possible:     true,
+			required:     required,
+			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+			mark:         parser.mark,
+		}
+
+		if !yaml_parser_remove_simple_key(parser) {
+			return false
+		}
+		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+		parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+	}
+	return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+	i := len(parser.simple_keys) - 1
+	if parser.simple_keys[i].possible {
+		// If the key is required, it is an error.
+		if parser.simple_keys[i].required {
+			return yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", parser.simple_keys[i].mark,
+				"could not find expected ':'")
+		}
+		// Remove the key from the stack.
+		parser.simple_keys[i].possible = false
+		delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+	}
+	return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+	// Reset the simple key on the next level.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+		possible:     false,
+		required:     false,
+		token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		mark:         parser.mark,
+	})
+
+	// Increase the flow level.
+	parser.flow_level++
+	if parser.flow_level > max_flow_level {
+		return yaml_parser_set_scanner_error(parser,
+			"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+			fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+	}
+	return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+	if parser.flow_level > 0 {
+		parser.flow_level--
+		last := len(parser.simple_keys) - 1
+		delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+		parser.simple_keys = parser.simple_keys[:last]
+	}
+	return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	if parser.indent < column {
+		// Push the current indentation level to the stack and set the new
+		// indentation level.
+		parser.indents = append(parser.indents, parser.indent)
+		parser.indent = column
+		if len(parser.indents) > max_indents {
+			return yaml_parser_set_scanner_error(parser,
+				"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+				fmt.Sprintf("exceeded max depth of %d", max_indents))
+		}
+
+		// Create a token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        typ,
+			start_mark: mark,
+			end_mark:   mark,
+		}
+		if number > -1 {
+			number -= parser.tokens_parsed
+		}
+		yaml_insert_token(parser, number, &token)
+	}
+	return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	block_mark := scan_mark
+	block_mark.index--
+
+	// Loop through the indentation levels in the stack.
+	for parser.indent > column {
+
+		// [Go] Reposition the end token before potential following
+		//      foot comments of parent blocks. For that, search
+		//      backwards for recent comments that were at the same
+		//      indent as the block that is ending now.
+		stop_index := block_mark.index
+		for i := len(parser.comments) - 1; i >= 0; i-- {
+			comment := &parser.comments[i]
+
+			if comment.end_mark.index < stop_index {
+				// Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+				// If requested indent column is < 0, then the document is over and everything else
+				// is a foot anyway.
+				break
+			}
+			if comment.start_mark.column == parser.indent+1 {
+				// This is a good match. But maybe there's a former comment
+				// at that same indent level, so keep searching.
+				block_mark = comment.start_mark
+			}
+
+			// While the end of the former comment matches with
+			// the start of the following one, we know there's
+			// nothing in between and scanning is still safe.
+			stop_index = comment.scan_mark.index
+		}
+
+		// Create a token and append it to the queue.
+		token := yaml_token_t{
+			typ:        yaml_BLOCK_END_TOKEN,
+			start_mark: block_mark,
+			end_mark:   block_mark,
+		}
+		yaml_insert_token(parser, -1, &token)
+
+		// Pop the indentation level.
+		parser.indent = parser.indents[len(parser.indents)-1]
+		parser.indents = parser.indents[:len(parser.indents)-1]
+	}
+	return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+	// Set the initial indentation.
+	parser.indent = -1
+
+	// Initialize the simple key stack.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	parser.simple_keys_by_tok = make(map[int]int)
+
+	// A simple key is allowed at the beginning of the stream.
+	parser.simple_key_allowed = true
+
+	// We have started.
+	parser.stream_start_produced = true
+
+	// Create the STREAM-START token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_START_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+		encoding:   parser.encoding,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+	// Force new line.
+	if parser.mark.column != 0 {
+		parser.mark.column = 0
+		parser.mark.line++
+	}
+
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the STREAM-END token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_END_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+	token := yaml_token_t{}
+	if !yaml_parser_scan_directive(parser, &token) {
+		return false
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+	start_mark := parser.mark
+
+	skip(parser)
+	skip(parser)
+	skip(parser)
+
+	end_mark := parser.mark
+
+	// Create the DOCUMENT-START or DOCUMENT-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+	// The indicators '[' and '{' may start a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// Increase the flow level.
+	if !yaml_parser_increase_flow_level(parser) {
+		return false
+	}
+
+	// A simple key may follow the indicators '[' and '{'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset any potential simple key on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Decrease the flow level.
+	if !yaml_parser_decrease_flow_level(parser) {
+		return false
+	}
+
+	// No simple keys after the indicators ']' and '}'.
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after ','.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_FLOW_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+	// Check if the scanner is in the block context.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new entry.
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"block sequence entries are not allowed in this context")
+		}
+		// Add the BLOCK-SEQUENCE-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+			return false
+		}
+	} else {
+		// It is an error for the '-' indicator to occur in the flow context,
+		// but we let the Parser detect and report about it because the Parser
+		// is able to point to the context.
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '-'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the BLOCK-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_BLOCK_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+	// In the block context, additional checks are required.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new key (not nessesary simple).
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"mapping keys are not allowed in this context")
+		}
+		// Add the BLOCK-MAPPING-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+			return false
+		}
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '?' in the block context.
+	parser.simple_key_allowed = parser.flow_level == 0
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the KEY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_KEY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+	// Have we found a simple key?
+	if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+		return false
+
+	} else if valid {
+
+		// Create the KEY token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        yaml_KEY_TOKEN,
+			start_mark: simple_key.mark,
+			end_mark:   simple_key.mark,
+		}
+		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+		// In the block context, we may need to add the BLOCK-MAPPING-START token.
+		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+			simple_key.token_number,
+			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+			return false
+		}
+
+		// Remove the simple key.
+		simple_key.possible = false
+		delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+		// A simple key cannot follow another simple key.
+		parser.simple_key_allowed = false
+
+	} else {
+		// The ':' indicator follows a complex key.
+
+		// In the block context, extra checks are required.
+		if parser.flow_level == 0 {
+
+			// Check if we are allowed to start a complex value.
+			if !parser.simple_key_allowed {
+				return yaml_parser_set_scanner_error(parser, "", parser.mark,
+					"mapping values are not allowed in this context")
+			}
+
+			// Add the BLOCK-MAPPING-START token if needed.
+			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+				return false
+			}
+		}
+
+		// Simple keys after ':' are allowed in the block context.
+		parser.simple_key_allowed = parser.flow_level == 0
+	}
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the VALUE token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_VALUE_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// An anchor or an alias could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow an anchor or an alias.
+	parser.simple_key_allowed = false
+
+	// Create the ALIAS or ANCHOR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_anchor(parser, &token, typ) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+	// A tag could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a tag.
+	parser.simple_key_allowed = false
+
+	// Create the TAG token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_tag(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+	// Remove any potential simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// A simple key may follow a block scalar.
+	parser.simple_key_allowed = true
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_plain_scalar(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+	scan_mark := parser.mark
+
+	// Until the next token is not found.
+	for {
+		// Allow the BOM mark to start a line.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+		}
+
+		// Eat whitespaces.
+		// Tabs are allowed:
+		//  - in the flow context
+		//  - in the block context, but not at the beginning of the line or
+		//  after '-', '?', or ':' (complex value).
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check if we just had a line comment under a sequence entry that
+		// looks more like a header to the following content. Similar to this:
+		//
+		// - # The comment
+		//   - Some data
+		//
+		// If so, transform the line comment to a head comment and reposition.
+		if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+			tokenA := parser.tokens[len(parser.tokens)-2]
+			tokenB := parser.tokens[len(parser.tokens)-1]
+			comment := &parser.comments[len(parser.comments)-1]
+			if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+				// If it was in the prior line, reposition so it becomes a
+				// header of the follow up token. Otherwise, keep it in place
+				// so it becomes a header of the former.
+				comment.head = comment.line
+				comment.line = nil
+				if comment.start_mark.line == parser.mark.line-1 {
+					comment.token_mark = parser.mark
+				}
+			}
+		}
+
+		// Eat a comment until a line break.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			if !yaml_parser_scan_comments(parser, scan_mark) {
+				return false
+			}
+		}
+
+		// If it is a line break, eat it.
+		if is_break(parser.buffer, parser.buffer_pos) {
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+			skip_line(parser)
+
+			// In the block context, a new line may start a simple key.
+			if parser.flow_level == 0 {
+				parser.simple_key_allowed = true
+			}
+		} else {
+			break // We have found a token.
+		}
+	}
+
+	return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Eat '%'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the directive name.
+	var name []byte
+	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+		return false
+	}
+
+	// Is it a YAML directive?
+	if bytes.Equal(name, []byte("YAML")) {
+		// Scan the VERSION directive value.
+		var major, minor int8
+		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a VERSION-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			major:      major,
+			minor:      minor,
+		}
+
+		// Is it a TAG directive?
+	} else if bytes.Equal(name, []byte("TAG")) {
+		// Scan the TAG directive value.
+		var handle, prefix []byte
+		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a TAG-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_TAG_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			value:      handle,
+			prefix:     prefix,
+		}
+
+		// Unknown directive.
+	} else {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unknown directive name")
+		return false
+	}
+
+	// Eat the rest of the line including any comments.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	if parser.buffer[parser.buffer_pos] == '#' {
+		// [Go] Discard this inline comment for the time being.
+		//if !yaml_parser_scan_line_comment(parser, start_mark) {
+		//	return false
+		//}
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+	// Consume the directive name.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	var s []byte
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the name is empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "could not find expected directive name")
+		return false
+	}
+
+	// Check for an blank character after the name.
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unexpected non-alphabetical character")
+		return false
+	}
+	*name = s
+	return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Consume the major version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+		return false
+	}
+
+	// Eat '.'.
+	if parser.buffer[parser.buffer_pos] != '.' {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected digit or '.' character")
+	}
+
+	skip(parser)
+
+	// Consume the minor version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+		return false
+	}
+	return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+	// Repeat while the next character is digit.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var value, length int8
+	for is_digit(parser.buffer, parser.buffer_pos) {
+		// Check if the number is too long.
+		length++
+		if length > max_number_length {
+			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+				start_mark, "found extremely long version number")
+		}
+		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the number was present.
+	if length == 0 {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected version number")
+	}
+	*number = value
+	return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+	var handle_value, prefix_value []byte
+
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a handle.
+	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+		return false
+	}
+
+	// Expect a whitespace.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace")
+		return false
+	}
+
+	// Eat whitespaces.
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a prefix.
+	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+		return false
+	}
+
+	// Expect a whitespace or line break.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	*handle = handle_value
+	*prefix = prefix_value
+	return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+	var s []byte
+
+	// Eat the indicator character.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the value.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	end_mark := parser.mark
+
+	/*
+	 * Check if length of the anchor is greater than 0 and it is followed by
+	 * a whitespace character or one of the indicators:
+	 *
+	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
+	 */
+
+	if len(s) == 0 ||
+		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+			parser.buffer[parser.buffer_pos] == '`') {
+		context := "while scanning an alias"
+		if typ == yaml_ANCHOR_TOKEN {
+			context = "while scanning an anchor"
+		}
+		yaml_parser_set_scanner_error(parser, context, start_mark,
+			"did not find expected alphabetic or numeric character")
+		return false
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+	}
+
+	return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+	var handle, suffix []byte
+
+	start_mark := parser.mark
+
+	// Check if the tag is in the canonical form.
+	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+		return false
+	}
+
+	if parser.buffer[parser.buffer_pos+1] == '<' {
+		// Keep the handle as ''
+
+		// Eat '!<'
+		skip(parser)
+		skip(parser)
+
+		// Consume the tag value.
+		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+			return false
+		}
+
+		// Check for '>' and eat it.
+		if parser.buffer[parser.buffer_pos] != '>' {
+			yaml_parser_set_scanner_error(parser, "while scanning a tag",
+				start_mark, "did not find the expected '>'")
+			return false
+		}
+
+		skip(parser)
+	} else {
+		// The tag has either the '!suffix' or the '!handle!suffix' form.
+
+		// First, try to scan a handle.
+		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+			return false
+		}
+
+		// Check if it is, indeed, handle.
+		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+			// Scan the suffix now.
+			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+				return false
+			}
+		} else {
+			// It wasn't a handle after all.  Scan the rest of the tag.
+			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+				return false
+			}
+
+			// Set the handle to '!'.
+			handle = []byte{'!'}
+
+			// A special case: the '!' tag.  Set the handle to '' and the
+			// suffix to '!'.
+			if len(suffix) == 0 {
+				handle, suffix = suffix, handle
+			}
+		}
+	}
+
+	// Check the character which ends the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a tag",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_TAG_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      handle,
+		suffix:     suffix,
+	}
+	return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+	// Check the initial '!' character.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if parser.buffer[parser.buffer_pos] != '!' {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected '!'")
+		return false
+	}
+
+	var s []byte
+
+	// Copy the '!' character.
+	s = read(parser, s)
+
+	// Copy all subsequent alphabetical and numerical characters.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the trailing character is '!' and copy it.
+	if parser.buffer[parser.buffer_pos] == '!' {
+		s = read(parser, s)
+	} else {
+		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
+		// directive, it's an error.  If it's a tag token, it must be a part of URI.
+		if directive && string(s) != "!" {
+			yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find expected '!'")
+			return false
+		}
+	}
+
+	*handle = s
+	return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+	//size_t length = head ? strlen((char *)head) : 0
+	var s []byte
+	hasTag := len(head) > 0
+
+	// Copy the head if needed.
+	//
+	// Note that we don't copy the leading '!' character.
+	if len(head) > 1 {
+		s = append(s, head[1:]...)
+	}
+
+	// Scan the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// The set of characters that may appear in URI is as follows:
+	//
+	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+	//      '%'.
+	// [Go] TODO Convert this into more reasonable logic.
+	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+		parser.buffer[parser.buffer_pos] == '%' {
+		// Check if it is a URI-escape sequence.
+		if parser.buffer[parser.buffer_pos] == '%' {
+			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+				return false
+			}
+		} else {
+			s = read(parser, s)
+		}
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		hasTag = true
+	}
+
+	if !hasTag {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected tag URI")
+		return false
+	}
+	*uri = s
+	return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+	// Decode the required number of characters.
+	w := 1024
+	for w > 0 {
+		// Check for a URI-escaped octet.
+		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+			return false
+		}
+
+		if !(parser.buffer[parser.buffer_pos] == '%' &&
+			is_hex(parser.buffer, parser.buffer_pos+1) &&
+			is_hex(parser.buffer, parser.buffer_pos+2)) {
+			return yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find URI escaped octet")
+		}
+
+		// Get the octet.
+		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+		// If it is the leading octet, determine the length of the UTF-8 sequence.
+		if w == 1024 {
+			w = width(octet)
+			if w == 0 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect leading UTF-8 octet")
+			}
+		} else {
+			// Check if the trailing octet is correct.
+			if octet&0xC0 != 0x80 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect trailing UTF-8 octet")
+			}
+		}
+
+		// Copy the octet and move the pointers.
+		*s = append(*s, octet)
+		skip(parser)
+		skip(parser)
+		skip(parser)
+		w--
+	}
+	return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+	// Eat the indicator '|' or '>'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the additional block scalar indicators.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check for a chomping indicator.
+	var chomping, increment int
+	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+		// Set the chomping method and eat the indicator.
+		if parser.buffer[parser.buffer_pos] == '+' {
+			chomping = +1
+		} else {
+			chomping = -1
+		}
+		skip(parser)
+
+		// Check for an indentation indicator.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if is_digit(parser.buffer, parser.buffer_pos) {
+			// Check that the indentation is greater than 0.
+			if parser.buffer[parser.buffer_pos] == '0' {
+				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+					start_mark, "found an indentation indicator equal to 0")
+				return false
+			}
+
+			// Get the indentation level and eat the indicator.
+			increment = as_digit(parser.buffer, parser.buffer_pos)
+			skip(parser)
+		}
+
+	} else if is_digit(parser.buffer, parser.buffer_pos) {
+		// Do the same as above, but in the opposite order.
+
+		if parser.buffer[parser.buffer_pos] == '0' {
+			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found an indentation indicator equal to 0")
+			return false
+		}
+		increment = as_digit(parser.buffer, parser.buffer_pos)
+		skip(parser)
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+			if parser.buffer[parser.buffer_pos] == '+' {
+				chomping = +1
+			} else {
+				chomping = -1
+			}
+			skip(parser)
+		}
+	}
+
+	// Eat whitespaces and comments to the end of the line.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+	if parser.buffer[parser.buffer_pos] == '#' {
+		if !yaml_parser_scan_line_comment(parser, start_mark) {
+			return false
+		}
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	end_mark := parser.mark
+
+	// Set the indentation level if it was specified.
+	var indent int
+	if increment > 0 {
+		if parser.indent >= 0 {
+			indent = parser.indent + increment
+		} else {
+			indent = increment
+		}
+	}
+
+	// Scan the leading line breaks and determine the indentation level if needed.
+	var s, leading_break, trailing_breaks []byte
+	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+		return false
+	}
+
+	// Scan the block scalar content.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var leading_blank, trailing_blank bool
+	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+		// We are at the beginning of a non-empty line.
+
+		// Is it a trailing whitespace?
+		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Check if we need to fold the leading line break.
+		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+			// Do we need to join the lines by space?
+			if len(trailing_breaks) == 0 {
+				s = append(s, ' ')
+			}
+		} else {
+			s = append(s, leading_break...)
+		}
+		leading_break = leading_break[:0]
+
+		// Append the remaining line breaks.
+		s = append(s, trailing_breaks...)
+		trailing_breaks = trailing_breaks[:0]
+
+		// Is it a leading whitespace?
+		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Consume the current line.
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			s = read(parser, s)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+
+		leading_break = read_line(parser, leading_break)
+
+		// Eat the following indentation spaces and line breaks.
+		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+			return false
+		}
+	}
+
+	// Chomp the tail.
+	if chomping != -1 {
+		s = append(s, leading_break...)
+	}
+	if chomping == 1 {
+		s = append(s, trailing_breaks...)
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_LITERAL_SCALAR_STYLE,
+	}
+	if !literal {
+		token.style = yaml_FOLDED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+	*end_mark = parser.mark
+
+	// Eat the indentation spaces and line breaks.
+	max_indent := 0
+	for {
+		// Eat the indentation spaces.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+		if parser.mark.column > max_indent {
+			max_indent = parser.mark.column
+		}
+
+		// Check for a tab character messing the indentation.
+		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found a tab character where an indentation space is expected")
+		}
+
+		// Have we found a non-empty line?
+		if !is_break(parser.buffer, parser.buffer_pos) {
+			break
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		// [Go] Should really be returning breaks instead.
+		*breaks = read_line(parser, *breaks)
+		*end_mark = parser.mark
+	}
+
+	// Determine the indentation level if needed.
+	if *indent == 0 {
+		*indent = max_indent
+		if *indent < parser.indent+1 {
+			*indent = parser.indent + 1
+		}
+		if *indent < 1 {
+			*indent = 1
+		}
+	}
+	return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+	// Eat the left quote.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the content of the quoted scalar.
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	for {
+		// Check that there are no document indicators at the beginning of the line.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected document indicator")
+			return false
+		}
+
+		// Check for EOF.
+		if is_z(parser.buffer, parser.buffer_pos) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected end of stream")
+			return false
+		}
+
+		// Consume non-blank characters.
+		leading_blanks := false
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+				// Is is an escaped single quote.
+				s = append(s, '\'')
+				skip(parser)
+				skip(parser)
+
+			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
+				// It is a right single quote.
+				break
+			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
+				// It is a right double quote.
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+				// It is an escaped line break.
+				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+					return false
+				}
+				skip(parser)
+				skip_line(parser)
+				leading_blanks = true
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+				// It is an escape sequence.
+				code_length := 0
+
+				// Check the escape character.
+				switch parser.buffer[parser.buffer_pos+1] {
+				case '0':
+					s = append(s, 0)
+				case 'a':
+					s = append(s, '\x07')
+				case 'b':
+					s = append(s, '\x08')
+				case 't', '\t':
+					s = append(s, '\x09')
+				case 'n':
+					s = append(s, '\x0A')
+				case 'v':
+					s = append(s, '\x0B')
+				case 'f':
+					s = append(s, '\x0C')
+				case 'r':
+					s = append(s, '\x0D')
+				case 'e':
+					s = append(s, '\x1B')
+				case ' ':
+					s = append(s, '\x20')
+				case '"':
+					s = append(s, '"')
+				case '\'':
+					s = append(s, '\'')
+				case '\\':
+					s = append(s, '\\')
+				case 'N': // NEL (#x85)
+					s = append(s, '\xC2')
+					s = append(s, '\x85')
+				case '_': // #xA0
+					s = append(s, '\xC2')
+					s = append(s, '\xA0')
+				case 'L': // LS (#x2028)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA8')
+				case 'P': // PS (#x2029)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA9')
+				case 'x':
+					code_length = 2
+				case 'u':
+					code_length = 4
+				case 'U':
+					code_length = 8
+				default:
+					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+						start_mark, "found unknown escape character")
+					return false
+				}
+
+				skip(parser)
+				skip(parser)
+
+				// Consume an arbitrary escape code.
+				if code_length > 0 {
+					var value int
+
+					// Scan the character value.
+					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+						return false
+					}
+					for k := 0; k < code_length; k++ {
+						if !is_hex(parser.buffer, parser.buffer_pos+k) {
+							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+								start_mark, "did not find expected hexdecimal number")
+							return false
+						}
+						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+					}
+
+					// Check the value and write the character.
+					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+							start_mark, "found invalid Unicode character escape code")
+						return false
+					}
+					if value <= 0x7F {
+						s = append(s, byte(value))
+					} else if value <= 0x7FF {
+						s = append(s, byte(0xC0+(value>>6)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else if value <= 0xFFFF {
+						s = append(s, byte(0xE0+(value>>12)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else {
+						s = append(s, byte(0xF0+(value>>18)))
+						s = append(s, byte(0x80+((value>>12)&0x3F)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					}
+
+					// Advance the pointer.
+					for k := 0; k < code_length; k++ {
+						skip(parser)
+					}
+				}
+			} else {
+				// It is a non-escaped non-blank character.
+				s = read(parser, s)
+			}
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		// Check if we are at the end of the scalar.
+		if single {
+			if parser.buffer[parser.buffer_pos] == '\'' {
+				break
+			}
+		} else {
+			if parser.buffer[parser.buffer_pos] == '"' {
+				break
+			}
+		}
+
+		// Consume blank characters.
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Join the whitespaces or fold line breaks.
+		if leading_blanks {
+			// Do we need to fold line breaks?
+			if len(leading_break) > 0 && leading_break[0] == '\n' {
+				if len(trailing_breaks) == 0 {
+					s = append(s, ' ')
+				} else {
+					s = append(s, trailing_breaks...)
+				}
+			} else {
+				s = append(s, leading_break...)
+				s = append(s, trailing_breaks...)
+			}
+			trailing_breaks = trailing_breaks[:0]
+			leading_break = leading_break[:0]
+		} else {
+			s = append(s, whitespaces...)
+			whitespaces = whitespaces[:0]
+		}
+	}
+
+	// Eat the right quote.
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+	}
+	if !single {
+		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	var leading_blanks bool
+	var indent = parser.indent + 1
+
+	start_mark := parser.mark
+	end_mark := parser.mark
+
+	// Consume the content of the plain scalar.
+	for {
+		// Check for a document indicator.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			break
+		}
+
+		// Check for a comment.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			break
+		}
+
+		// Consume non-blank characters.
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+			// Check for indicators that may end a plain scalar.
+			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+				(parser.flow_level > 0 &&
+					(parser.buffer[parser.buffer_pos] == ',' ||
+						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+						parser.buffer[parser.buffer_pos] == '}')) {
+				break
+			}
+
+			// Check if we need to join whitespaces and breaks.
+			if leading_blanks || len(whitespaces) > 0 {
+				if leading_blanks {
+					// Do we need to fold line breaks?
+					if leading_break[0] == '\n' {
+						if len(trailing_breaks) == 0 {
+							s = append(s, ' ')
+						} else {
+							s = append(s, trailing_breaks...)
+						}
+					} else {
+						s = append(s, leading_break...)
+						s = append(s, trailing_breaks...)
+					}
+					trailing_breaks = trailing_breaks[:0]
+					leading_break = leading_break[:0]
+					leading_blanks = false
+				} else {
+					s = append(s, whitespaces...)
+					whitespaces = whitespaces[:0]
+				}
+			}
+
+			// Copy the character.
+			s = read(parser, s)
+
+			end_mark = parser.mark
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Is it the end?
+		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+			break
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+
+				// Check for tab characters that abuse indentation.
+				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+						start_mark, "found a tab character that violates indentation")
+					return false
+				}
+
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check indentation level.
+		if parser.flow_level == 0 && parser.mark.column < indent {
+			break
+		}
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_PLAIN_SCALAR_STYLE,
+	}
+
+	// Note that we change the 'simple_key_allowed' flag.
+	if leading_blanks {
+		parser.simple_key_allowed = true
+	}
+	return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+	if parser.newlines > 0 {
+		return true
+	}
+
+	var start_mark yaml_mark_t
+	var text []byte
+
+	for peek := 0; peek < 512; peek++ {
+		if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+			break
+		}
+		if is_blank(parser.buffer, parser.buffer_pos+peek) {
+			continue
+		}
+		if parser.buffer[parser.buffer_pos+peek] == '#' {
+			seen := parser.mark.index+peek
+			for {
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+				if is_breakz(parser.buffer, parser.buffer_pos) {
+					if parser.mark.index >= seen {
+						break
+					}
+					if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+						return false
+					}
+					skip_line(parser)
+				} else if parser.mark.index >= seen {
+					if len(text) == 0 {
+						start_mark = parser.mark
+					}
+					text = read(parser, text)
+				} else {
+					skip(parser)
+				}
+			}
+		}
+		break
+	}
+	if len(text) > 0 {
+		parser.comments = append(parser.comments, yaml_comment_t{
+			token_mark: token_mark,
+			start_mark: start_mark,
+			line: text,
+		})
+	}
+	return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+	token := parser.tokens[len(parser.tokens)-1]
+
+	if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+		token = parser.tokens[len(parser.tokens)-2]
+	}
+
+	var token_mark = token.start_mark
+	var start_mark yaml_mark_t
+	var next_indent = parser.indent
+	if next_indent < 0 {
+		next_indent = 0
+	}
+
+	var recent_empty = false
+	var first_empty = parser.newlines <= 1
+
+	var line = parser.mark.line
+	var column = parser.mark.column
+
+	var text []byte
+
+	// The foot line is the place where a comment must start to
+	// still be considered as a foot of the prior content.
+	// If there's some content in the currently parsed line, then
+	// the foot is the line below it.
+	var foot_line = -1
+	if scan_mark.line > 0 {
+		foot_line = parser.mark.line-parser.newlines+1
+		if parser.newlines == 0 && parser.mark.column > 1 {
+			foot_line++
+		}
+	}
+
+	var peek = 0
+	for ; peek < 512; peek++ {
+		if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+			break
+		}
+		column++
+		if is_blank(parser.buffer, parser.buffer_pos+peek) {
+			continue
+		}
+		c := parser.buffer[parser.buffer_pos+peek]
+		var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
+		if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
+			// Got line break or terminator.
+			if close_flow || !recent_empty {
+				if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
+					// This is the first empty line and there were no empty lines before,
+					// so this initial part of the comment is a foot of the prior token
+					// instead of being a head for the following one. Split it up.
+					// Alternatively, this might also be the last comment inside a flow
+					// scope, so it must be a footer.
+					if len(text) > 0 {
+						if start_mark.column-1 < next_indent {
+							// If dedented it's unrelated to the prior token.
+							token_mark = start_mark
+						}
+						parser.comments = append(parser.comments, yaml_comment_t{
+							scan_mark:  scan_mark,
+							token_mark: token_mark,
+							start_mark: start_mark,
+							end_mark:   yaml_mark_t{parser.mark.index + peek, line, column},
+							foot:       text,
+						})
+						scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+						token_mark = scan_mark
+						text = nil
+					}
+				} else {
+					if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+						text = append(text, '\n')
+					}
+				}
+			}
+			if !is_break(parser.buffer, parser.buffer_pos+peek) {
+				break
+			}
+			first_empty = false
+			recent_empty = true
+			column = 0
+			line++
+			continue
+		}
+
+		if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
+			// The comment at the different indentation is a foot of the
+			// preceding data rather than a head of the upcoming one.
+			parser.comments = append(parser.comments, yaml_comment_t{
+				scan_mark:  scan_mark,
+				token_mark: token_mark,
+				start_mark: start_mark,
+				end_mark:   yaml_mark_t{parser.mark.index + peek, line, column},
+				foot:       text,
+			})
+			scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+			token_mark = scan_mark
+			text = nil
+		}
+
+		if parser.buffer[parser.buffer_pos+peek] != '#' {
+			break
+		}
+
+		if len(text) == 0 {
+			start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+		} else {
+			text = append(text, '\n')
+		}
+
+		recent_empty = false
+
+		// Consume until after the consumed comment line.
+		seen := parser.mark.index+peek
+		for {
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+			if is_breakz(parser.buffer, parser.buffer_pos) {
+				if parser.mark.index >= seen {
+					break
+				}
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+				skip_line(parser)
+			} else if parser.mark.index >= seen {
+				text = read(parser, text)
+			} else {
+				skip(parser)
+			}
+		}
+
+		peek = 0
+		column = 0
+		line = parser.mark.line
+		next_indent = parser.indent
+		if next_indent < 0 {
+			next_indent = 0
+		}
+	}
+
+	if len(text) > 0 {
+		parser.comments = append(parser.comments, yaml_comment_t{
+			scan_mark:  scan_mark,
+			token_mark: start_mark,
+			start_mark: start_mark,
+			end_mark:   yaml_mark_t{parser.mark.index + peek - 1, line, column},
+			head:       text,
+		})
+	}
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
new file mode 100644
index 0000000..9210ece
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+	"reflect"
+	"unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int      { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+	a := l[i]
+	b := l[j]
+	ak := a.Kind()
+	bk := b.Kind()
+	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+		a = a.Elem()
+		ak = a.Kind()
+	}
+	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+		b = b.Elem()
+		bk = b.Kind()
+	}
+	af, aok := keyFloat(a)
+	bf, bok := keyFloat(b)
+	if aok && bok {
+		if af != bf {
+			return af < bf
+		}
+		if ak != bk {
+			return ak < bk
+		}
+		return numLess(a, b)
+	}
+	if ak != reflect.String || bk != reflect.String {
+		return ak < bk
+	}
+	ar, br := []rune(a.String()), []rune(b.String())
+	digits := false
+	for i := 0; i < len(ar) && i < len(br); i++ {
+		if ar[i] == br[i] {
+			digits = unicode.IsDigit(ar[i])
+			continue
+		}
+		al := unicode.IsLetter(ar[i])
+		bl := unicode.IsLetter(br[i])
+		if al && bl {
+			return ar[i] < br[i]
+		}
+		if al || bl {
+			if digits {
+				return al
+			} else {
+				return bl
+			}
+		}
+		var ai, bi int
+		var an, bn int64
+		if ar[i] == '0' || br[i] == '0' {
+			for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+				if ar[j] != '0' {
+					an = 1
+					bn = 1
+					break
+				}
+			}
+		}
+		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+			an = an*10 + int64(ar[ai]-'0')
+		}
+		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+			bn = bn*10 + int64(br[bi]-'0')
+		}
+		if an != bn {
+			return an < bn
+		}
+		if ai != bi {
+			return ai < bi
+		}
+		return ar[i] < br[i]
+	}
+	return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return float64(v.Int()), true
+	case reflect.Float32, reflect.Float64:
+		return v.Float(), true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return float64(v.Uint()), true
+	case reflect.Bool:
+		if v.Bool() {
+			return 1, true
+		}
+		return 0, true
+	}
+	return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return a.Int() < b.Int()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	}
+	panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
new file mode 100644
index 0000000..b8a116b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
@@ -0,0 +1,48 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
new file mode 100644
index 0000000..8cec6da
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
@@ -0,0 +1,698 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"sync"
+	"unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+	UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	parser      *parser
+	knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+	dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder()
+	d.knownFields = dec.knownFields
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+	d := newDecoder()
+	defer handleErr(&err)
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(n, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+	defer handleErr(&err)
+	d := newDecoder()
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be excluded if IsZero returns true.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshalDoc("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshalDoc("", reflect.ValueOf(v))
+	e.finish()
+	p := newParser(e.out)
+	p.textless = true
+	defer p.destroy()
+	doc := p.parse()
+	*n = *doc.Content[0]
+	return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+	if spaces < 0 {
+		panic("yaml: cannot indent to a negative number of spaces")
+	}
+	e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+type Kind uint32
+
+const (
+	DocumentNode Kind = 1 << iota
+	SequenceNode
+	MappingNode
+	ScalarNode
+	AliasNode
+)
+
+type Style uint32
+
+const (
+	TaggedStyle Style = 1 << iota
+	DoubleQuotedStyle
+	SingleQuotedStyle
+	LiteralStyle
+	FoldedStyle
+	FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+//     var person struct {
+//             Name    string
+//             Address yaml.Node
+//     }
+//     err := yaml.Unmarshal(data, &person)
+// 
+// Or by itself:
+//
+//     var person Node
+//     err := yaml.Unmarshal(data, &person)
+//
+type Node struct {
+	// Kind defines whether the node is a document, a mapping, a sequence,
+	// a scalar value, or an alias to another node. The specific data type of
+	// scalar nodes may be obtained via the ShortTag and LongTag methods.
+	Kind  Kind
+
+	// Style allows customizing the apperance of the node in the tree.
+	Style Style
+
+	// Tag holds the YAML tag defining the data type for the value.
+	// When decoding, this field will always be set to the resolved tag,
+	// even when it wasn't explicitly provided in the YAML content.
+	// When encoding, if this field is unset the value type will be
+	// implied from the node properties, and if it is set, it will only
+	// be serialized into the representation if TaggedStyle is used or
+	// the implicit tag diverges from the provided one.
+	Tag string
+
+	// Value holds the unescaped and unquoted represenation of the value.
+	Value string
+
+	// Anchor holds the anchor name for this node, which allows aliases to point to it.
+	Anchor string
+
+	// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+	Alias *Node
+
+	// Content holds contained nodes for documents, mappings, and sequences.
+	Content []*Node
+
+	// HeadComment holds any comments in the lines preceding the node and
+	// not separated by an empty line.
+	HeadComment string
+
+	// LineComment holds any comments at the end of the line where the node is in.
+	LineComment string
+
+	// FootComment holds any comments following the node and before empty lines.
+	FootComment string
+
+	// Line and Column hold the node position in the decoded YAML text.
+	// These fields are not respected when encoding the node.
+	Line   int
+	Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+	return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+		n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+	return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+	if n.indicatedString() {
+		return strTag
+	}
+	if n.Tag == "" || n.Tag == "!" {
+		switch n.Kind {
+		case MappingNode:
+			return mapTag
+		case SequenceNode:
+			return seqTag
+		case AliasNode:
+			if n.Alias != nil {
+				return n.Alias.ShortTag()
+			}
+		case ScalarNode:
+			tag, _ := resolve("", n.Value)
+			return tag
+		case 0:
+			// Special case to make the zero value convenient.
+			if n.IsZero() {
+				return nullTag
+			}
+		}
+		return ""
+	}
+	return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+	return n.Kind == ScalarNode &&
+		(shortTag(n.Tag) == strTag ||
+			(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+	n.Kind = ScalarNode
+	if utf8.ValidString(s) {
+		n.Value = s
+		n.Tag = strTag
+	} else {
+		n.Value = encodeBase64(s)
+		n.Tag = binaryTag
+	}
+	if strings.Contains(n.Value, "\n") {
+		n.Style = LiteralStyle
+	}
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+
+	// InlineUnmarshalers holds indexes to inlined fields that
+	// contain unmarshaler values.
+	InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+	var v Unmarshaler
+	unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	inlineUnmarshalers := [][]int(nil)
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct, reflect.Ptr:
+				ftype := field.Type
+				for ftype.Kind() == reflect.Ptr {
+					ftype = ftype.Elem()
+				}
+				if ftype.Kind() != reflect.Struct {
+					return nil, errors.New("option ,inline may only be used on a struct or map field")
+				}
+				if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+					inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+				} else {
+					sinfo, err := getStructInfo(ftype)
+					if err != nil {
+						return nil, err
+					}
+					for _, index := range sinfo.InlineUnmarshalers {
+						inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+					}
+					for _, finfo := range sinfo.FieldsList {
+						if _, found := fieldsMap[finfo.Key]; found {
+							msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+							return nil, errors.New(msg)
+						}
+						if finfo.Inline == nil {
+							finfo.Inline = []int{i, finfo.Num}
+						} else {
+							finfo.Inline = append([]int{i}, finfo.Inline...)
+						}
+						finfo.Id = len(fieldsList)
+						fieldsMap[finfo.Key] = finfo
+						fieldsList = append(fieldsList, finfo)
+					}
+				}
+			default:
+				return nil, errors.New("option ,inline may only be used on a struct or map field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		info.Id = len(fieldsList)
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{
+		FieldsMap:          fieldsMap,
+		FieldsList:         fieldsList,
+		InlineMap:          inlineMap,
+		InlineUnmarshalers: inlineUnmarshalers,
+	}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
new file mode 100644
index 0000000..7c6d007
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
@@ -0,0 +1,807 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+	"fmt"
+	"io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+	major int8 // The major version number.
+	minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+	handle []byte // The tag handle.
+	prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+	// Let the parser choose the encoding.
+	yaml_ANY_ENCODING yaml_encoding_t = iota
+
+	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+	// Let the parser choose the break type.
+	yaml_ANY_BREAK yaml_break_t = iota
+
+	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+	// No error is produced.
+	yaml_NO_ERROR yaml_error_type_t = iota
+
+	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+	yaml_READER_ERROR   // Cannot read or decode the input stream.
+	yaml_SCANNER_ERROR  // Cannot scan the input stream.
+	yaml_PARSER_ERROR   // Cannot parse the input stream.
+	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+	yaml_WRITER_ERROR   // Cannot write to the output stream.
+	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+	index  int // The position index.
+	line   int // The position line.
+	column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+	yaml_PLAIN_SCALAR_STYLE         yaml_scalar_style_t = 1 << iota // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE                                 // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE                                 // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE                                       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE                                        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+	// An empty token.
+	yaml_NO_TOKEN yaml_token_type_t = iota
+
+	yaml_STREAM_START_TOKEN // A STREAM-START token.
+	yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+	yaml_KEY_TOKEN         // A KEY token.
+	yaml_VALUE_TOKEN       // A VALUE token.
+
+	yaml_ALIAS_TOKEN  // An ALIAS token.
+	yaml_ANCHOR_TOKEN // An ANCHOR token.
+	yaml_TAG_TOKEN    // A TAG token.
+	yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+	switch tt {
+	case yaml_NO_TOKEN:
+		return "yaml_NO_TOKEN"
+	case yaml_STREAM_START_TOKEN:
+		return "yaml_STREAM_START_TOKEN"
+	case yaml_STREAM_END_TOKEN:
+		return "yaml_STREAM_END_TOKEN"
+	case yaml_VERSION_DIRECTIVE_TOKEN:
+		return "yaml_VERSION_DIRECTIVE_TOKEN"
+	case yaml_TAG_DIRECTIVE_TOKEN:
+		return "yaml_TAG_DIRECTIVE_TOKEN"
+	case yaml_DOCUMENT_START_TOKEN:
+		return "yaml_DOCUMENT_START_TOKEN"
+	case yaml_DOCUMENT_END_TOKEN:
+		return "yaml_DOCUMENT_END_TOKEN"
+	case yaml_BLOCK_SEQUENCE_START_TOKEN:
+		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+	case yaml_BLOCK_MAPPING_START_TOKEN:
+		return "yaml_BLOCK_MAPPING_START_TOKEN"
+	case yaml_BLOCK_END_TOKEN:
+		return "yaml_BLOCK_END_TOKEN"
+	case yaml_FLOW_SEQUENCE_START_TOKEN:
+		return "yaml_FLOW_SEQUENCE_START_TOKEN"
+	case yaml_FLOW_SEQUENCE_END_TOKEN:
+		return "yaml_FLOW_SEQUENCE_END_TOKEN"
+	case yaml_FLOW_MAPPING_START_TOKEN:
+		return "yaml_FLOW_MAPPING_START_TOKEN"
+	case yaml_FLOW_MAPPING_END_TOKEN:
+		return "yaml_FLOW_MAPPING_END_TOKEN"
+	case yaml_BLOCK_ENTRY_TOKEN:
+		return "yaml_BLOCK_ENTRY_TOKEN"
+	case yaml_FLOW_ENTRY_TOKEN:
+		return "yaml_FLOW_ENTRY_TOKEN"
+	case yaml_KEY_TOKEN:
+		return "yaml_KEY_TOKEN"
+	case yaml_VALUE_TOKEN:
+		return "yaml_VALUE_TOKEN"
+	case yaml_ALIAS_TOKEN:
+		return "yaml_ALIAS_TOKEN"
+	case yaml_ANCHOR_TOKEN:
+		return "yaml_ANCHOR_TOKEN"
+	case yaml_TAG_TOKEN:
+		return "yaml_TAG_TOKEN"
+	case yaml_SCALAR_TOKEN:
+		return "yaml_SCALAR_TOKEN"
+	}
+	return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+	// The token type.
+	typ yaml_token_type_t
+
+	// The start/end of the token.
+	start_mark, end_mark yaml_mark_t
+
+	// The stream encoding (for yaml_STREAM_START_TOKEN).
+	encoding yaml_encoding_t
+
+	// The alias/anchor/scalar value or tag/tag directive handle
+	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+	value []byte
+
+	// The tag suffix (for yaml_TAG_TOKEN).
+	suffix []byte
+
+	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+	prefix []byte
+
+	// The scalar style (for yaml_SCALAR_TOKEN).
+	style yaml_scalar_style_t
+
+	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+	major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+	// An empty event.
+	yaml_NO_EVENT yaml_event_type_t = iota
+
+	yaml_STREAM_START_EVENT   // A STREAM-START event.
+	yaml_STREAM_END_EVENT     // A STREAM-END event.
+	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+	yaml_ALIAS_EVENT          // An ALIAS event.
+	yaml_SCALAR_EVENT         // A SCALAR event.
+	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+	yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+	yaml_NO_EVENT:             "none",
+	yaml_STREAM_START_EVENT:   "stream start",
+	yaml_STREAM_END_EVENT:     "stream end",
+	yaml_DOCUMENT_START_EVENT: "document start",
+	yaml_DOCUMENT_END_EVENT:   "document end",
+	yaml_ALIAS_EVENT:          "alias",
+	yaml_SCALAR_EVENT:         "scalar",
+	yaml_SEQUENCE_START_EVENT: "sequence start",
+	yaml_SEQUENCE_END_EVENT:   "sequence end",
+	yaml_MAPPING_START_EVENT:  "mapping start",
+	yaml_MAPPING_END_EVENT:    "mapping end",
+	yaml_TAIL_COMMENT_EVENT:   "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+	if e < 0 || int(e) >= len(eventStrings) {
+		return fmt.Sprintf("unknown event %d", e)
+	}
+	return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+	// The event type.
+	typ yaml_event_type_t
+
+	// The start and end of the event.
+	start_mark, end_mark yaml_mark_t
+
+	// The document encoding (for yaml_STREAM_START_EVENT).
+	encoding yaml_encoding_t
+
+	// The version directive (for yaml_DOCUMENT_START_EVENT).
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+	tag_directives []yaml_tag_directive_t
+
+	// The comments
+	head_comment []byte
+	line_comment []byte
+	foot_comment []byte
+	tail_comment []byte
+
+	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+	anchor []byte
+
+	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	tag []byte
+
+	// The scalar value (for yaml_SCALAR_EVENT).
+	value []byte
+
+	// Is the document start/end indicator implicit, or the tag optional?
+	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+	implicit bool
+
+	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+	quoted_implicit bool
+
+	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+	// Not in original libyaml.
+	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+	// An empty node.
+	yaml_NO_NODE yaml_node_type_t = iota
+
+	yaml_SCALAR_NODE   // A scalar node.
+	yaml_SEQUENCE_NODE // A sequence node.
+	yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+	key   int // The key of the element.
+	value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+	typ yaml_node_type_t // The node type.
+	tag []byte           // The node tag.
+
+	// The node data.
+
+	// The scalar parameters (for yaml_SCALAR_NODE).
+	scalar struct {
+		value  []byte              // The scalar value.
+		length int                 // The length of the scalar value.
+		style  yaml_scalar_style_t // The scalar style.
+	}
+
+	// The sequence parameters (for YAML_SEQUENCE_NODE).
+	sequence struct {
+		items_data []yaml_node_item_t    // The stack of sequence items.
+		style      yaml_sequence_style_t // The sequence style.
+	}
+
+	// The mapping parameters (for yaml_MAPPING_NODE).
+	mapping struct {
+		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+		pairs_start *yaml_node_pair_t    // The beginning of the stack.
+		pairs_end   *yaml_node_pair_t    // The end of the stack.
+		pairs_top   *yaml_node_pair_t    // The top of the stack.
+		style       yaml_mapping_style_t // The mapping style.
+	}
+
+	start_mark yaml_mark_t // The beginning of the node.
+	end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+	// The document nodes.
+	nodes []yaml_node_t
+
+	// The version directive.
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives.
+	tag_directives_data  []yaml_tag_directive_t
+	tag_directives_start int // The beginning of the tag directives list.
+	tag_directives_end   int // The end of the tag directives list.
+
+	start_implicit int // Is the document start indicator implicit?
+	end_implicit   int // Is the document end indicator implicit?
+
+	// The start/end of the document.
+	start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+	possible     bool        // Is a simple key possible?
+	required     bool        // Is a simple key required?
+	token_number int         // The number of the token.
+	mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+	yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+	switch ps {
+	case yaml_PARSE_STREAM_START_STATE:
+		return "yaml_PARSE_STREAM_START_STATE"
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return "yaml_PARSE_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return "yaml_PARSE_DOCUMENT_END_STATE"
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_STATE"
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return "yaml_PARSE_FLOW_NODE_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+	case yaml_PARSE_END_STATE:
+		return "yaml_PARSE_END_STATE"
+	}
+	return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+	anchor []byte      // The anchor.
+	index  int         // The node id.
+	mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+	// Error handling
+
+	error yaml_error_type_t // Error type.
+
+	problem string // Error description.
+
+	// The byte about which the problem occurred.
+	problem_offset int
+	problem_value  int
+	problem_mark   yaml_mark_t
+
+	// The error context.
+	context      string
+	context_mark yaml_mark_t
+
+	// Reader stuff
+
+	read_handler yaml_read_handler_t // Read handler.
+
+	input_reader io.Reader // File input data.
+	input        []byte    // String input data.
+	input_pos    int
+
+	eof bool // EOF flag
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	unread int // The number of unread characters in the buffer.
+
+	newlines int // The number of line breaks since last non-break/non-blank character
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The input encoding.
+
+	offset int         // The offset of the current position (in bytes).
+	mark   yaml_mark_t // The mark of the current position.
+
+	// Comments
+
+	head_comment []byte // The current head comments
+	line_comment []byte // The current line comments
+	foot_comment []byte // The current foot comments
+	tail_comment []byte // Foot comment that happens at the end of a block.
+	stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+	comments      []yaml_comment_t // The folded comments for all parsed tokens
+	comments_head int
+
+	// Scanner stuff
+
+	stream_start_produced bool // Have we started to scan the input stream?
+	stream_end_produced   bool // Have we reached the end of the input stream?
+
+	flow_level int // The number of unclosed '[' and '{' indicators.
+
+	tokens          []yaml_token_t // The tokens queue.
+	tokens_head     int            // The head of the tokens queue.
+	tokens_parsed   int            // The number of tokens fetched from the queue.
+	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+	indent  int   // The current indentation level.
+	indents []int // The indentation levels stack.
+
+	simple_key_allowed bool                // May a simple key occur at the current position?
+	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+	simple_keys_by_tok map[int]int         // possible simple_key indexes indexed by token_number
+
+	// Parser stuff
+
+	state          yaml_parser_state_t    // The current parser state.
+	states         []yaml_parser_state_t  // The parser states stack.
+	marks          []yaml_mark_t          // The stack of marks.
+	tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+	// Dumper stuff
+
+	aliases []yaml_alias_data_t // The alias data.
+
+	document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+
+	scan_mark  yaml_mark_t // Position where scanning for comments started
+	token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+	start_mark yaml_mark_t // Position of '#' comment mark
+	end_mark   yaml_mark_t // Position where comment terminated
+
+	head []byte
+	line []byte
+	foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+	// Expect STREAM-START.
+	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE   // Expect the next item of a flow sequence, with the comma already written out
+	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE     // Expect the next key of a flow mapping, with the comma already written out
+	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+	yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+	// Error handling
+
+	error   yaml_error_type_t // Error type.
+	problem string            // Error description.
+
+	// Writer stuff
+
+	write_handler yaml_write_handler_t // Write handler.
+
+	output_buffer *[]byte   // String output data.
+	output_writer io.Writer // File output data.
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The stream encoding.
+
+	// Emitter stuff
+
+	canonical   bool         // If the output is in the canonical style?
+	best_indent int          // The number of indentation spaces.
+	best_width  int          // The preferred width of the output lines.
+	unicode     bool         // Allow unescaped non-ASCII characters?
+	line_break  yaml_break_t // The preferred line break.
+
+	state  yaml_emitter_state_t   // The current emitter state.
+	states []yaml_emitter_state_t // The stack of states.
+
+	events      []yaml_event_t // The event queue.
+	events_head int            // The head of the event queue.
+
+	indents []int // The stack of indentation levels.
+
+	tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+	indent int // The current indentation level.
+
+	flow_level int // The current flow level.
+
+	root_context       bool // Is it the document root context?
+	sequence_context   bool // Is it a sequence context?
+	mapping_context    bool // Is it a mapping context?
+	simple_key_context bool // Is it a simple mapping key context?
+
+	line       int  // The current line.
+	column     int  // The current column.
+	whitespace bool // If the last character was a whitespace?
+	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+	open_ended bool // If an explicit document end is required?
+
+	space_above bool // Is there's an empty line above?
+	foot_indent int  // The indent used to write the foot comment above, or -1 if none.
+
+	// Anchor analysis.
+	anchor_data struct {
+		anchor []byte // The anchor value.
+		alias  bool   // Is it an alias?
+	}
+
+	// Tag analysis.
+	tag_data struct {
+		handle []byte // The tag handle.
+		suffix []byte // The tag suffix.
+	}
+
+	// Scalar analysis.
+	scalar_data struct {
+		value                 []byte              // The scalar value.
+		multiline             bool                // Does the scalar contain line breaks?
+		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+		style                 yaml_scalar_style_t // The output style.
+	}
+
+	// Comments
+	head_comment []byte
+	line_comment []byte
+	foot_comment []byte
+	tail_comment []byte
+
+	key_line_comment []byte
+
+	// Dumper stuff
+
+	opened bool // If the stream was already opened?
+	closed bool // If the stream was already closed?
+
+	// The information associated with the document nodes.
+	anchors *struct {
+		references int  // The number of references.
+		anchor     int  // The anchor id.
+		serialized bool // If the node has been emitted?
+	}
+
+	last_anchor_id int // The last assigned anchor id.
+
+	document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
new file mode 100644
index 0000000..e88f9c5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+// 
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+// 
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+// 
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+// 
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+	// The size of the input raw buffer.
+	input_raw_buffer_size = 512
+
+	// The size of the input buffer.
+	// It should be possible to decode the whole raw buffer.
+	input_buffer_size = input_raw_buffer_size * 3
+
+	// The size of the output buffer.
+	output_buffer_size = 128
+
+	// The size of the output raw buffer.
+	// It should be possible to encode the whole output buffer.
+	output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+	// The size of other stacks and queues.
+	initial_stack_size  = 16
+	initial_queue_size  = 16
+	initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+	return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+	bi := b[i]
+	if bi >= 'A' && bi <= 'F' {
+		return int(bi) - 'A' + 10
+	}
+	if bi >= 'a' && bi <= 'f' {
+		return int(bi) - 'a' + 10
+	}
+	return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+	return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+	return ((b[i] == 0x0A) || // . == #x0A
+		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+		(b[i] > 0xC2 && b[i] < 0xED) ||
+		(b[i] == 0xED && b[i+1] < 0xA0) ||
+		(b[i] == 0xEE) ||
+		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+	return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+	return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+	return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+	//return is_space(b, i) || is_tab(b, i)
+	return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+	return (b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+	return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+	//return is_break(b, i) || is_z(b, i)
+	return (
+		// is_break:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		// is_z:
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+	//return is_space(b, i) || is_breakz(b, i)
+	return (
+		// is_space:
+		b[i] == ' ' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+	//return is_blank(b, i) || is_breakz(b, i)
+	return (
+		// is_blank:
+		b[i] == ' ' || b[i] == '\t' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+	// Don't replace these by a switch without first
+	// confirming that it is being inlined.
+	if b&0x80 == 0x00 {
+		return 1
+	}
+	if b&0xE0 == 0xC0 {
+		return 2
+	}
+	if b&0xF0 == 0xE0 {
+		return 3
+	}
+	if b&0xF8 == 0xF0 {
+		return 4
+	}
+	return 0
+
+}